forked from mirrors/gecko-dev
Bug 1892895 - [devtools] Remove JS CSS Lexer. r=devtools-reviewers,ochameau.
Differential Revision: https://phabricator.services.mozilla.com/D208318
This commit is contained in:
parent
e342530686
commit
99792f9e05
13 changed files with 103 additions and 1661 deletions
|
|
@ -12,7 +12,9 @@
|
||||||
|
|
||||||
"use strict";
|
"use strict";
|
||||||
|
|
||||||
const { getCSSLexer } = require("resource://devtools/shared/css/lexer.js");
|
const {
|
||||||
|
InspectorCSSParserWrapper,
|
||||||
|
} = require("resource://devtools/shared/css/lexer.js");
|
||||||
const {
|
const {
|
||||||
COMMENT_PARSING_HEURISTIC_BYPASS_CHAR,
|
COMMENT_PARSING_HEURISTIC_BYPASS_CHAR,
|
||||||
escapeCSSComment,
|
escapeCSSComment,
|
||||||
|
|
@ -194,7 +196,7 @@ RuleRewriter.prototype = {
|
||||||
// into "url(;)" by this code -- due to the way "url(...)" is
|
// into "url(;)" by this code -- due to the way "url(...)" is
|
||||||
// parsed as a single token.
|
// parsed as a single token.
|
||||||
text = text.replace(/;$/, "");
|
text = text.replace(/;$/, "");
|
||||||
const lexer = getCSSLexer(text, true, true);
|
const lexer = new InspectorCSSParserWrapper(text, { trackEOFChars: true });
|
||||||
|
|
||||||
let result = "";
|
let result = "";
|
||||||
let previousOffset = 0;
|
let previousOffset = 0;
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ loader.lazyRequireGetter(
|
||||||
);
|
);
|
||||||
loader.lazyRequireGetter(
|
loader.lazyRequireGetter(
|
||||||
this,
|
this,
|
||||||
"getCSSLexer",
|
"InspectorCSSParserWrapper",
|
||||||
"resource://devtools/shared/css/lexer.js",
|
"resource://devtools/shared/css/lexer.js",
|
||||||
true
|
true
|
||||||
);
|
);
|
||||||
|
|
@ -51,7 +51,7 @@ function advanceValidate(keyCode, value, insertionPoint) {
|
||||||
// value. Otherwise it's been inserted in some spot where it has a
|
// value. Otherwise it's been inserted in some spot where it has a
|
||||||
// valid meaning, like a comment or string.
|
// valid meaning, like a comment or string.
|
||||||
value = value.slice(0, insertionPoint) + ";" + value.slice(insertionPoint);
|
value = value.slice(0, insertionPoint) + ";" + value.slice(insertionPoint);
|
||||||
const lexer = getCSSLexer(value, true);
|
const lexer = new InspectorCSSParserWrapper(value);
|
||||||
while (true) {
|
while (true) {
|
||||||
const token = lexer.nextToken();
|
const token = lexer.nextToken();
|
||||||
if (token.endOffset > insertionPoint) {
|
if (token.endOffset > insertionPoint) {
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,9 @@
|
||||||
|
|
||||||
const SPECIALVALUES = new Set(["initial", "inherit", "unset"]);
|
const SPECIALVALUES = new Set(["initial", "inherit", "unset"]);
|
||||||
|
|
||||||
const { getCSSLexer } = require("resource://devtools/shared/css/lexer.js");
|
const {
|
||||||
|
InspectorCSSParserWrapper,
|
||||||
|
} = require("resource://devtools/shared/css/lexer.js");
|
||||||
|
|
||||||
loader.lazyRequireGetter(
|
loader.lazyRequireGetter(
|
||||||
this,
|
this,
|
||||||
|
|
@ -68,7 +70,7 @@ CssAngle.prototype = {
|
||||||
},
|
},
|
||||||
|
|
||||||
get valid() {
|
get valid() {
|
||||||
const token = getCSSLexer(this.authored, true).nextToken();
|
const token = new InspectorCSSParserWrapper(this.authored).nextToken();
|
||||||
if (!token) {
|
if (!token) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,9 @@ const {
|
||||||
angleUtils,
|
angleUtils,
|
||||||
} = require("resource://devtools/client/shared/css-angle.js");
|
} = require("resource://devtools/client/shared/css-angle.js");
|
||||||
const { colorUtils } = require("resource://devtools/shared/css/color.js");
|
const { colorUtils } = require("resource://devtools/shared/css/color.js");
|
||||||
const { getCSSLexer } = require("resource://devtools/shared/css/lexer.js");
|
const {
|
||||||
|
InspectorCSSParserWrapper,
|
||||||
|
} = require("resource://devtools/shared/css/lexer.js");
|
||||||
const {
|
const {
|
||||||
appendText,
|
appendText,
|
||||||
} = require("resource://devtools/client/inspector/shared/utils.js");
|
} = require("resource://devtools/client/inspector/shared/utils.js");
|
||||||
|
|
@ -733,7 +735,7 @@ class OutputParser {
|
||||||
text = text.trim();
|
text = text.trim();
|
||||||
this.#parsed.length = 0;
|
this.#parsed.length = 0;
|
||||||
|
|
||||||
const tokenStream = getCSSLexer(text, true);
|
const tokenStream = new InspectorCSSParserWrapper(text);
|
||||||
return this.#doParse(text, options, tokenStream, false);
|
return this.#doParse(text, options, tokenStream, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -931,7 +933,7 @@ class OutputParser {
|
||||||
*/
|
*/
|
||||||
// eslint-disable-next-line complexity
|
// eslint-disable-next-line complexity
|
||||||
#addPolygonPointNodes(coords, container) {
|
#addPolygonPointNodes(coords, container) {
|
||||||
const tokenStream = getCSSLexer(coords, true);
|
const tokenStream = new InspectorCSSParserWrapper(coords);
|
||||||
let token = tokenStream.nextToken();
|
let token = tokenStream.nextToken();
|
||||||
let coord = "";
|
let coord = "";
|
||||||
let i = 0;
|
let i = 0;
|
||||||
|
|
@ -1081,7 +1083,7 @@ class OutputParser {
|
||||||
*/
|
*/
|
||||||
// eslint-disable-next-line complexity
|
// eslint-disable-next-line complexity
|
||||||
#addCirclePointNodes(coords, container) {
|
#addCirclePointNodes(coords, container) {
|
||||||
const tokenStream = getCSSLexer(coords, true);
|
const tokenStream = new InspectorCSSParserWrapper(coords);
|
||||||
let token = tokenStream.nextToken();
|
let token = tokenStream.nextToken();
|
||||||
let depth = 0;
|
let depth = 0;
|
||||||
let coord = "";
|
let coord = "";
|
||||||
|
|
@ -1242,7 +1244,7 @@ class OutputParser {
|
||||||
*/
|
*/
|
||||||
// eslint-disable-next-line complexity
|
// eslint-disable-next-line complexity
|
||||||
#addEllipsePointNodes(coords, container) {
|
#addEllipsePointNodes(coords, container) {
|
||||||
const tokenStream = getCSSLexer(coords, true);
|
const tokenStream = new InspectorCSSParserWrapper(coords);
|
||||||
let token = tokenStream.nextToken();
|
let token = tokenStream.nextToken();
|
||||||
let depth = 0;
|
let depth = 0;
|
||||||
let coord = "";
|
let coord = "";
|
||||||
|
|
@ -1413,7 +1415,7 @@ class OutputParser {
|
||||||
// eslint-disable-next-line complexity
|
// eslint-disable-next-line complexity
|
||||||
#addInsetPointNodes(coords, container) {
|
#addInsetPointNodes(coords, container) {
|
||||||
const insetPoints = ["top", "right", "bottom", "left"];
|
const insetPoints = ["top", "right", "bottom", "left"];
|
||||||
const tokenStream = getCSSLexer(coords, true);
|
const tokenStream = new InspectorCSSParserWrapper(coords);
|
||||||
let token = tokenStream.nextToken();
|
let token = tokenStream.nextToken();
|
||||||
let depth = 0;
|
let depth = 0;
|
||||||
let coord = "";
|
let coord = "";
|
||||||
|
|
@ -1777,7 +1779,9 @@ class OutputParser {
|
||||||
*/
|
*/
|
||||||
#sanitizeURL(url) {
|
#sanitizeURL(url) {
|
||||||
// Re-lex the URL and add any needed termination characters.
|
// Re-lex the URL and add any needed termination characters.
|
||||||
const urlTokenizer = getCSSLexer(url, true, true);
|
const urlTokenizer = new InspectorCSSParserWrapper(url, {
|
||||||
|
trackEOFChars: true,
|
||||||
|
});
|
||||||
// Just read until EOF; there will only be a single token.
|
// Just read until EOF; there will only be a single token.
|
||||||
while (urlTokenizer.nextToken()) {
|
while (urlTokenizer.nextToken()) {
|
||||||
// Nothing.
|
// Nothing.
|
||||||
|
|
|
||||||
|
|
@ -1101,7 +1101,7 @@ CSSCompleter.prototype = {
|
||||||
}
|
}
|
||||||
|
|
||||||
let prevToken = undefined;
|
let prevToken = undefined;
|
||||||
const tokensIterator = cssTokenizer(lineText, true);
|
const tokensIterator = cssTokenizer(lineText);
|
||||||
let found = false;
|
let found = false;
|
||||||
const ech = line == caret.line ? caret.ch : 0;
|
const ech = line == caret.line ? caret.ch : 0;
|
||||||
for (let token of tokensIterator) {
|
for (let token of tokensIterator) {
|
||||||
|
|
@ -1165,7 +1165,7 @@ CSSCompleter.prototype = {
|
||||||
lineText = lineText.substring(0, caret.ch);
|
lineText = lineText.substring(0, caret.ch);
|
||||||
}
|
}
|
||||||
|
|
||||||
const tokens = Array.from(cssTokenizer(lineText, true));
|
const tokens = Array.from(cssTokenizer(lineText));
|
||||||
let found = false;
|
let found = false;
|
||||||
for (let i = tokens.length - 1; i >= 0; i--) {
|
for (let i = tokens.length - 1; i >= 0; i--) {
|
||||||
let token = tokens[i];
|
let token = tokens[i];
|
||||||
|
|
@ -1246,7 +1246,7 @@ CSSCompleter.prototype = {
|
||||||
};
|
};
|
||||||
} else if (state == CSS_STATES.property) {
|
} else if (state == CSS_STATES.property) {
|
||||||
// A property can only be a single word and thus very easy to calculate.
|
// A property can only be a single word and thus very easy to calculate.
|
||||||
const tokens = cssTokenizer(sourceArray[line], true);
|
const tokens = cssTokenizer(sourceArray[line]);
|
||||||
for (const token of tokens) {
|
for (const token of tokens) {
|
||||||
// Note that, because we're tokenizing a single line, the
|
// Note that, because we're tokenizing a single line, the
|
||||||
// token's offset is also the column number.
|
// token's offset is also the column number.
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,9 @@ const {
|
||||||
PRESETS,
|
PRESETS,
|
||||||
DEFAULT_PRESET_CATEGORY,
|
DEFAULT_PRESET_CATEGORY,
|
||||||
} = require("resource://devtools/client/shared/widgets/CubicBezierPresets.js");
|
} = require("resource://devtools/client/shared/widgets/CubicBezierPresets.js");
|
||||||
const { getCSSLexer } = require("resource://devtools/shared/css/lexer.js");
|
const {
|
||||||
|
InspectorCSSParserWrapper,
|
||||||
|
} = require("resource://devtools/shared/css/lexer.js");
|
||||||
const XHTML_NS = "http://www.w3.org/1999/xhtml";
|
const XHTML_NS = "http://www.w3.org/1999/xhtml";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -918,7 +920,7 @@ function parseTimingFunction(value) {
|
||||||
return PREDEFINED[value];
|
return PREDEFINED[value];
|
||||||
}
|
}
|
||||||
|
|
||||||
const tokenStream = getCSSLexer(value, true);
|
const tokenStream = new InspectorCSSParserWrapper(value);
|
||||||
const getNextToken = () => {
|
const getNextToken = () => {
|
||||||
while (true) {
|
while (true) {
|
||||||
const token = tokenStream.nextToken();
|
const token = tokenStream.nextToken();
|
||||||
|
|
|
||||||
|
|
@ -1030,7 +1030,7 @@ function tokenizeFilterValue(css) {
|
||||||
let state = "initial";
|
let state = "initial";
|
||||||
let name;
|
let name;
|
||||||
let contents;
|
let contents;
|
||||||
for (const token of cssTokenizer(css, true)) {
|
for (const token of cssTokenizer(css)) {
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case "initial":
|
case "initial":
|
||||||
if (token.tokenType === "Function") {
|
if (token.tokenType === "Function") {
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
const EventEmitter = require("devtools/shared/event-emitter");
|
const EventEmitter = require("devtools/shared/event-emitter");
|
||||||
const { getCSSLexer } = require("devtools/shared/css/lexer");
|
const { InspectorCSSParserWrapper } = require("devtools/shared/css/lexer");
|
||||||
const { throttle } = require("devtools/shared/throttle");
|
const { throttle } = require("devtools/shared/throttle");
|
||||||
const XHTML_NS = "http://www.w3.org/1999/xhtml";
|
const XHTML_NS = "http://www.w3.org/1999/xhtml";
|
||||||
const SVG_NS = "http://www.w3.org/2000/svg";
|
const SVG_NS = "http://www.w3.org/2000/svg";
|
||||||
|
|
@ -578,7 +578,7 @@ class TimingFunctionPreviewWidget {
|
||||||
*/
|
*/
|
||||||
function parseTimingFunction(value) {
|
function parseTimingFunction(value) {
|
||||||
value = value.trim();
|
value = value.trim();
|
||||||
const tokenStream = getCSSLexer(value, true);
|
const tokenStream = new InspectorCSSParserWrapper(value);
|
||||||
const getNextToken = () => {
|
const getNextToken = () => {
|
||||||
while (true) {
|
while (true) {
|
||||||
const token = tokenStream.nextToken();
|
const token = tokenStream.nextToken();
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,9 @@ const {
|
||||||
styleRuleSpec,
|
styleRuleSpec,
|
||||||
} = require("resource://devtools/shared/specs/style-rule.js");
|
} = require("resource://devtools/shared/specs/style-rule.js");
|
||||||
|
|
||||||
const { getCSSLexer } = require("resource://devtools/shared/css/lexer.js");
|
const {
|
||||||
|
InspectorCSSParserWrapper,
|
||||||
|
} = require("resource://devtools/shared/css/lexer.js");
|
||||||
const TrackChangeEmitter = require("resource://devtools/server/actors/utils/track-change-emitter.js");
|
const TrackChangeEmitter = require("resource://devtools/server/actors/utils/track-change-emitter.js");
|
||||||
const {
|
const {
|
||||||
getRuleText,
|
getRuleText,
|
||||||
|
|
@ -1316,7 +1318,7 @@ function getSelectorOffsets(initialText, line, column) {
|
||||||
line,
|
line,
|
||||||
column
|
column
|
||||||
);
|
);
|
||||||
const lexer = getCSSLexer(text, true);
|
const lexer = new InspectorCSSParserWrapper(text);
|
||||||
|
|
||||||
// Search forward for the opening brace.
|
// Search forward for the opening brace.
|
||||||
let endOffset;
|
let endOffset;
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -11,7 +11,9 @@
|
||||||
|
|
||||||
"use strict";
|
"use strict";
|
||||||
|
|
||||||
const { getCSSLexer } = require("resource://devtools/shared/css/lexer.js");
|
const {
|
||||||
|
InspectorCSSParserWrapper,
|
||||||
|
} = require("resource://devtools/shared/css/lexer.js");
|
||||||
|
|
||||||
loader.lazyRequireGetter(
|
loader.lazyRequireGetter(
|
||||||
this,
|
this,
|
||||||
|
|
@ -35,22 +37,18 @@ const COMMENT_PARSING_HEURISTIC_BYPASS_CHAR =
|
||||||
* CSS tokens. Comment tokens are dropped.
|
* CSS tokens. Comment tokens are dropped.
|
||||||
*
|
*
|
||||||
* @param {String} CSS source string
|
* @param {String} CSS source string
|
||||||
* @param {Boolean} useInspectorCSSParser Set to true to use InspectorCSSParser.
|
|
||||||
* @yield {CSSToken} The next CSSToken that is lexed
|
* @yield {CSSToken} The next CSSToken that is lexed
|
||||||
* @see CSSToken for details about the returned tokens
|
* @see CSSToken for details about the returned tokens
|
||||||
*/
|
*/
|
||||||
function* cssTokenizer(string, useInspectorCSSParser = false) {
|
function* cssTokenizer(string) {
|
||||||
const lexer = getCSSLexer(string, useInspectorCSSParser);
|
const lexer = new InspectorCSSParserWrapper(string);
|
||||||
while (true) {
|
while (true) {
|
||||||
const token = lexer.nextToken();
|
const token = lexer.nextToken();
|
||||||
if (!token) {
|
if (!token) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// None of the existing consumers want comments.
|
// None of the existing consumers want comments.
|
||||||
if (
|
if (token.tokenType !== "Comment") {
|
||||||
token.tokenType !== "comment" ||
|
|
||||||
(useInspectorCSSParser && token.tokenType !== "Comment")
|
|
||||||
) {
|
|
||||||
yield token;
|
yield token;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -76,7 +74,7 @@ function* cssTokenizer(string, useInspectorCSSParser = false) {
|
||||||
* line and column information.
|
* line and column information.
|
||||||
*/
|
*/
|
||||||
function cssTokenizerWithLineColumn(string) {
|
function cssTokenizerWithLineColumn(string) {
|
||||||
const lexer = getCSSLexer(string, true);
|
const lexer = new InspectorCSSParserWrapper(string);
|
||||||
const result = [];
|
const result = [];
|
||||||
let prevToken = undefined;
|
let prevToken = undefined;
|
||||||
while (true) {
|
while (true) {
|
||||||
|
|
@ -299,7 +297,9 @@ function parseDeclarationsInternal(
|
||||||
throw new Error("empty input string");
|
throw new Error("empty input string");
|
||||||
}
|
}
|
||||||
|
|
||||||
const lexer = getCSSLexer(inputString, true, true);
|
const lexer = new InspectorCSSParserWrapper(inputString, {
|
||||||
|
trackEOFChars: true,
|
||||||
|
});
|
||||||
|
|
||||||
let declarations = [getEmptyDeclaration()];
|
let declarations = [getEmptyDeclaration()];
|
||||||
let lastProp = declarations[0];
|
let lastProp = declarations[0];
|
||||||
|
|
@ -642,11 +642,7 @@ function parsePseudoClassesAndAttributes(value) {
|
||||||
|
|
||||||
// See InspectorCSSToken dictionnary in InspectorUtils.webidl for more information
|
// See InspectorCSSToken dictionnary in InspectorUtils.webidl for more information
|
||||||
// about the tokens.
|
// about the tokens.
|
||||||
const tokensIterator = cssTokenizer(
|
const tokensIterator = cssTokenizer(value);
|
||||||
value,
|
|
||||||
// useInspectorCSSParser
|
|
||||||
true
|
|
||||||
);
|
|
||||||
const result = [];
|
const result = [];
|
||||||
let current = "";
|
let current = "";
|
||||||
let functionCount = 0;
|
let functionCount = 0;
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ const MAX_DATA_URL_LENGTH = 40;
|
||||||
|
|
||||||
loader.lazyRequireGetter(
|
loader.lazyRequireGetter(
|
||||||
this,
|
this,
|
||||||
"getCSSLexer",
|
"InspectorCSSParserWrapper",
|
||||||
"resource://devtools/shared/css/lexer.js",
|
"resource://devtools/shared/css/lexer.js",
|
||||||
true
|
true
|
||||||
);
|
);
|
||||||
|
|
@ -290,7 +290,7 @@ function prettifyCSS(text, ruleCount) {
|
||||||
// minified file.
|
// minified file.
|
||||||
let indent = "";
|
let indent = "";
|
||||||
let indentLevel = 0;
|
let indentLevel = 0;
|
||||||
const lexer = getCSSLexer(text, true);
|
const lexer = new InspectorCSSParserWrapper(text);
|
||||||
// List of mappings of token positions from original source to prettified source.
|
// List of mappings of token positions from original source to prettified source.
|
||||||
const mappings = [];
|
const mappings = [];
|
||||||
// Line and column offsets used to shift the token positions after prettyfication.
|
// Line and column offsets used to shift the token positions after prettyfication.
|
||||||
|
|
|
||||||
|
|
@ -5,28 +5,15 @@
|
||||||
|
|
||||||
"use strict";
|
"use strict";
|
||||||
|
|
||||||
const jsLexer = require("resource://devtools/shared/css/lexer.js");
|
const {
|
||||||
|
InspectorCSSParserWrapper,
|
||||||
|
} = require("resource://devtools/shared/css/lexer.js");
|
||||||
|
|
||||||
add_task(function test_lexer() {
|
add_task(function test_lexer() {
|
||||||
const LEX_TESTS = [
|
const LEX_TESTS = [
|
||||||
[
|
["simple", [{ tokenType: "Ident", text: "simple", value: "simple" }]],
|
||||||
"simple",
|
|
||||||
["ident:simple"],
|
|
||||||
[{ tokenType: "Ident", text: "simple", value: "simple" }],
|
|
||||||
],
|
|
||||||
[
|
[
|
||||||
"simple: { hi; }",
|
"simple: { hi; }",
|
||||||
[
|
|
||||||
"ident:simple",
|
|
||||||
"symbol::",
|
|
||||||
"whitespace",
|
|
||||||
"symbol:{",
|
|
||||||
"whitespace",
|
|
||||||
"ident:hi",
|
|
||||||
"symbol:;",
|
|
||||||
"whitespace",
|
|
||||||
"symbol:}",
|
|
||||||
],
|
|
||||||
[
|
[
|
||||||
{ tokenType: "Ident", text: "simple", value: "simple" },
|
{ tokenType: "Ident", text: "simple", value: "simple" },
|
||||||
{ tokenType: "Colon", text: ":" },
|
{ tokenType: "Colon", text: ":" },
|
||||||
|
|
@ -41,30 +28,18 @@ add_task(function test_lexer() {
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
"/* whatever */",
|
"/* whatever */",
|
||||||
["comment"],
|
|
||||||
[{ tokenType: "Comment", text: "/* whatever */", value: " whatever " }],
|
[{ tokenType: "Comment", text: "/* whatever */", value: " whatever " }],
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
"'string'",
|
"'string'",
|
||||||
["string:string"],
|
|
||||||
[{ tokenType: "QuotedString", text: "'string'", value: "string" }],
|
[{ tokenType: "QuotedString", text: "'string'", value: "string" }],
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
'"string"',
|
'"string"',
|
||||||
["string:string"],
|
|
||||||
[{ tokenType: "QuotedString", text: `"string"`, value: "string" }],
|
[{ tokenType: "QuotedString", text: `"string"`, value: "string" }],
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
"rgb(1,2,3)",
|
"rgb(1,2,3)",
|
||||||
[
|
|
||||||
"function:rgb",
|
|
||||||
"number",
|
|
||||||
"symbol:,",
|
|
||||||
"number",
|
|
||||||
"symbol:,",
|
|
||||||
"number",
|
|
||||||
"symbol:)",
|
|
||||||
],
|
|
||||||
[
|
[
|
||||||
{ tokenType: "Function", text: "rgb(", value: "rgb" },
|
{ tokenType: "Function", text: "rgb(", value: "rgb" },
|
||||||
{ tokenType: "Number", text: "1", number: 1 },
|
{ tokenType: "Number", text: "1", number: 1 },
|
||||||
|
|
@ -75,30 +50,16 @@ add_task(function test_lexer() {
|
||||||
{ tokenType: "CloseParenthesis", text: ")" },
|
{ tokenType: "CloseParenthesis", text: ")" },
|
||||||
],
|
],
|
||||||
],
|
],
|
||||||
[
|
["@media", [{ tokenType: "AtKeyword", text: "@media", value: "media" }]],
|
||||||
"@media",
|
["#hibob", [{ tokenType: "IDHash", text: "#hibob", value: "hibob" }]],
|
||||||
["at:media"],
|
["#123", [{ tokenType: "Hash", text: "#123", value: "123" }]],
|
||||||
[{ tokenType: "AtKeyword", text: "@media", value: "media" }],
|
|
||||||
],
|
|
||||||
[
|
|
||||||
"#hibob",
|
|
||||||
["id:hibob"],
|
|
||||||
[{ tokenType: "IDHash", text: "#hibob", value: "hibob" }],
|
|
||||||
],
|
|
||||||
["#123", ["hash:123"], [{ tokenType: "Hash", text: "#123", value: "123" }]],
|
|
||||||
[
|
[
|
||||||
"23px",
|
"23px",
|
||||||
["dimension:px"],
|
|
||||||
[{ tokenType: "Dimension", text: "23px", number: 23, unit: "px" }],
|
[{ tokenType: "Dimension", text: "23px", number: 23, unit: "px" }],
|
||||||
],
|
],
|
||||||
[
|
["23%", [{ tokenType: "Percentage", text: "23%", number: 0.23 }]],
|
||||||
"23%",
|
|
||||||
["percentage"],
|
|
||||||
[{ tokenType: "Percentage", text: "23%", number: 0.23 }],
|
|
||||||
],
|
|
||||||
[
|
[
|
||||||
"url(http://example.com)",
|
"url(http://example.com)",
|
||||||
["url:http://example.com"],
|
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
tokenType: "UnquotedUrl",
|
tokenType: "UnquotedUrl",
|
||||||
|
|
@ -109,7 +70,6 @@ add_task(function test_lexer() {
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
"url('http://example.com')",
|
"url('http://example.com')",
|
||||||
["url:http://example.com"],
|
|
||||||
[
|
[
|
||||||
{ tokenType: "Function", text: "url(", value: "url" },
|
{ tokenType: "Function", text: "url(", value: "url" },
|
||||||
{
|
{
|
||||||
|
|
@ -122,7 +82,6 @@ add_task(function test_lexer() {
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
"url( 'http://example.com' )",
|
"url( 'http://example.com' )",
|
||||||
["url:http://example.com"],
|
|
||||||
[
|
[
|
||||||
{ tokenType: "Function", text: "url(", value: "url" },
|
{ tokenType: "Function", text: "url(", value: "url" },
|
||||||
{ tokenType: "WhiteSpace", text: " " },
|
{ tokenType: "WhiteSpace", text: " " },
|
||||||
|
|
@ -138,7 +97,6 @@ add_task(function test_lexer() {
|
||||||
// In CSS Level 3, this is an ordinary URL, not a BAD_URL.
|
// In CSS Level 3, this is an ordinary URL, not a BAD_URL.
|
||||||
[
|
[
|
||||||
"url(http://example.com",
|
"url(http://example.com",
|
||||||
["url:http://example.com"],
|
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
tokenType: "UnquotedUrl",
|
tokenType: "UnquotedUrl",
|
||||||
|
|
@ -149,7 +107,6 @@ add_task(function test_lexer() {
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
"url(http://example.com @",
|
"url(http://example.com @",
|
||||||
["bad_url:http://example.com"],
|
|
||||||
[
|
[
|
||||||
{
|
{
|
||||||
tokenType: "BadUrl",
|
tokenType: "BadUrl",
|
||||||
|
|
@ -160,34 +117,23 @@ add_task(function test_lexer() {
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
"quo\\ting",
|
"quo\\ting",
|
||||||
["ident:quoting"],
|
|
||||||
[{ tokenType: "Ident", text: "quo\\ting", value: "quoting" }],
|
[{ tokenType: "Ident", text: "quo\\ting", value: "quoting" }],
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
"'bad string\n",
|
"'bad string\n",
|
||||||
["bad_string:bad string", "whitespace"],
|
|
||||||
[
|
[
|
||||||
{ tokenType: "BadString", text: "'bad string", value: "bad string" },
|
{ tokenType: "BadString", text: "'bad string", value: "bad string" },
|
||||||
{ tokenType: "WhiteSpace", text: "\n" },
|
{ tokenType: "WhiteSpace", text: "\n" },
|
||||||
],
|
],
|
||||||
],
|
],
|
||||||
["~=", ["includes"], [{ tokenType: "IncludeMatch", text: "~=" }]],
|
["~=", [{ tokenType: "IncludeMatch", text: "~=" }]],
|
||||||
["|=", ["dashmatch"], [{ tokenType: "DashMatch", text: "|=" }]],
|
["|=", [{ tokenType: "DashMatch", text: "|=" }]],
|
||||||
["^=", ["beginsmatch"], [{ tokenType: "PrefixMatch", text: "^=" }]],
|
["^=", [{ tokenType: "PrefixMatch", text: "^=" }]],
|
||||||
["$=", ["endsmatch"], [{ tokenType: "SuffixMatch", text: "$=" }]],
|
["$=", [{ tokenType: "SuffixMatch", text: "$=" }]],
|
||||||
["*=", ["containsmatch"], [{ tokenType: "SubstringMatch", text: "*=" }]],
|
["*=", [{ tokenType: "SubstringMatch", text: "*=" }]],
|
||||||
|
|
||||||
[
|
[
|
||||||
"<!-- html comment -->",
|
"<!-- html comment -->",
|
||||||
[
|
|
||||||
"htmlcomment",
|
|
||||||
"whitespace",
|
|
||||||
"ident:html",
|
|
||||||
"whitespace",
|
|
||||||
"ident:comment",
|
|
||||||
"whitespace",
|
|
||||||
"htmlcomment",
|
|
||||||
],
|
|
||||||
[
|
[
|
||||||
{ tokenType: "CDO", text: "<!--" },
|
{ tokenType: "CDO", text: "<!--" },
|
||||||
{ tokenType: "WhiteSpace", text: " " },
|
{ tokenType: "WhiteSpace", text: " " },
|
||||||
|
|
@ -203,44 +149,36 @@ add_task(function test_lexer() {
|
||||||
// unterminated comments are just comments.
|
// unterminated comments are just comments.
|
||||||
[
|
[
|
||||||
"/* bad comment",
|
"/* bad comment",
|
||||||
["comment"],
|
|
||||||
[{ tokenType: "Comment", text: "/* bad comment", value: " bad comment" }],
|
[{ tokenType: "Comment", text: "/* bad comment", value: " bad comment" }],
|
||||||
],
|
],
|
||||||
];
|
];
|
||||||
|
|
||||||
const test = (cssText, useInspectorCSSParser, tokenTypes) => {
|
const test = (cssText, tokenTypes) => {
|
||||||
const lexer = jsLexer.getCSSLexer(cssText, useInspectorCSSParser);
|
const lexer = new InspectorCSSParserWrapper(cssText);
|
||||||
let reconstructed = "";
|
let reconstructed = "";
|
||||||
let lastTokenEnd = 0;
|
let lastTokenEnd = 0;
|
||||||
let i = 0;
|
let i = 0;
|
||||||
let token;
|
let token;
|
||||||
while ((token = lexer.nextToken())) {
|
while ((token = lexer.nextToken())) {
|
||||||
let combined = token.tokenType;
|
const expectedToken = tokenTypes[i];
|
||||||
if (token.text) {
|
Assert.deepEqual(
|
||||||
combined += ":" + token.text;
|
{
|
||||||
}
|
tokenType: token.tokenType,
|
||||||
if (useInspectorCSSParser) {
|
text: token.text,
|
||||||
const expectedToken = tokenTypes[i];
|
value: token.value,
|
||||||
Assert.deepEqual(
|
number: token.number,
|
||||||
{
|
unit: token.unit,
|
||||||
tokenType: token.tokenType,
|
},
|
||||||
text: token.text,
|
{
|
||||||
value: token.value,
|
tokenType: expectedToken.tokenType,
|
||||||
number: token.number,
|
text: expectedToken.text,
|
||||||
unit: token.unit,
|
value: expectedToken.value ?? null,
|
||||||
},
|
number: expectedToken.number ?? null,
|
||||||
{
|
unit: expectedToken.unit ?? null,
|
||||||
tokenType: expectedToken.tokenType,
|
},
|
||||||
text: expectedToken.text,
|
`Got expected token #${i} for "${cssText}"`
|
||||||
value: expectedToken.value ?? null,
|
);
|
||||||
number: expectedToken.number ?? null,
|
|
||||||
unit: expectedToken.unit ?? null,
|
|
||||||
},
|
|
||||||
`Got expected token #${i} for "${cssText}"`
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
equal(combined, tokenTypes[i]);
|
|
||||||
}
|
|
||||||
Assert.greater(token.endOffset, token.startOffset);
|
Assert.greater(token.endOffset, token.startOffset);
|
||||||
equal(token.startOffset, lastTokenEnd);
|
equal(token.startOffset, lastTokenEnd);
|
||||||
lastTokenEnd = token.endOffset;
|
lastTokenEnd = token.endOffset;
|
||||||
|
|
@ -253,41 +191,29 @@ add_task(function test_lexer() {
|
||||||
equal(reconstructed, cssText);
|
equal(reconstructed, cssText);
|
||||||
};
|
};
|
||||||
|
|
||||||
for (const [cssText, jsTokenTypes, rustTokenTypes] of LEX_TESTS) {
|
for (const [cssText, rustTokenTypes] of LEX_TESTS) {
|
||||||
info(`Test "${cssText}" with js-based lexer`);
|
info(`Test "${cssText}"`);
|
||||||
test(cssText, false, jsTokenTypes);
|
test(cssText, rustTokenTypes);
|
||||||
|
|
||||||
info(`Test "${cssText}" with rust-based lexer`);
|
|
||||||
test(cssText, true, rustTokenTypes);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
add_task(function test_lexer_linecol() {
|
add_task(function test_lexer_linecol() {
|
||||||
const LINECOL_TESTS = [
|
const LINECOL_TESTS = [
|
||||||
["simple", ["ident:0:0", ":0:6"], ["Ident:0:0", ":0:6"]],
|
["simple", ["Ident:0:0", ":0:6"]],
|
||||||
[
|
["\n stuff", ["WhiteSpace:0:0", "Ident:1:4", ":1:9"]],
|
||||||
"\n stuff",
|
|
||||||
["whitespace:0:0", "ident:1:4", ":1:9"],
|
|
||||||
["WhiteSpace:0:0", "Ident:1:4", ":1:9"],
|
|
||||||
],
|
|
||||||
[
|
[
|
||||||
'"string with \\\nnewline" \r\n',
|
'"string with \\\nnewline" \r\n',
|
||||||
["string:0:0", "whitespace:1:8", ":2:0"],
|
|
||||||
["QuotedString:0:0", "WhiteSpace:1:8", ":2:0"],
|
["QuotedString:0:0", "WhiteSpace:1:8", ":2:0"],
|
||||||
],
|
],
|
||||||
];
|
];
|
||||||
|
|
||||||
const test = (cssText, useInspectorCSSParser, locations) => {
|
const test = (cssText, locations) => {
|
||||||
const lexer = jsLexer.getCSSLexer(cssText, useInspectorCSSParser);
|
const lexer = new InspectorCSSParserWrapper(cssText);
|
||||||
let i = 0;
|
let i = 0;
|
||||||
let token;
|
let token;
|
||||||
const testLocation = () => {
|
const testLocation = () => {
|
||||||
const startLine = useInspectorCSSParser
|
const startLine = lexer.parser.lineNumber;
|
||||||
? lexer.parser.lineNumber
|
const startColumn = lexer.parser.columnNumber;
|
||||||
: lexer.lineNumber;
|
|
||||||
const startColumn = useInspectorCSSParser
|
|
||||||
? lexer.parser.columnNumber
|
|
||||||
: lexer.columnNumber;
|
|
||||||
|
|
||||||
// We do this in a bit of a funny way so that we can also test the
|
// We do this in a bit of a funny way so that we can also test the
|
||||||
// location of the EOF.
|
// location of the EOF.
|
||||||
|
|
@ -308,12 +234,9 @@ add_task(function test_lexer_linecol() {
|
||||||
equal(i, locations.length);
|
equal(i, locations.length);
|
||||||
};
|
};
|
||||||
|
|
||||||
for (const [cssText, jsLocations, rustLocations] of LINECOL_TESTS) {
|
for (const [cssText, rustLocations] of LINECOL_TESTS) {
|
||||||
info(`Test "${cssText}" with js-based lexer`);
|
info(`Test "${cssText}"`);
|
||||||
test(cssText, false, jsLocations);
|
test(cssText, rustLocations);
|
||||||
|
|
||||||
info(`Test "${cssText}" with rust-based lexer`);
|
|
||||||
test(cssText, true, rustLocations);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -340,17 +263,13 @@ add_task(function test_lexer_eofchar() {
|
||||||
["'\\", "\\'", "'", ""],
|
["'\\", "\\'", "'", ""],
|
||||||
];
|
];
|
||||||
|
|
||||||
const test = (
|
const test = (cssText, expectedAppend, expectedNoAppend, argText) => {
|
||||||
cssText,
|
|
||||||
useInspectorCSSParser,
|
|
||||||
expectedAppend,
|
|
||||||
expectedNoAppend,
|
|
||||||
argText
|
|
||||||
) => {
|
|
||||||
if (!expectedNoAppend) {
|
if (!expectedNoAppend) {
|
||||||
expectedNoAppend = expectedAppend;
|
expectedNoAppend = expectedAppend;
|
||||||
}
|
}
|
||||||
const lexer = jsLexer.getCSSLexer(cssText, useInspectorCSSParser, true);
|
const lexer = new InspectorCSSParserWrapper(cssText, {
|
||||||
|
trackEOFChars: true,
|
||||||
|
});
|
||||||
while (lexer.nextToken()) {
|
while (lexer.nextToken()) {
|
||||||
// We don't need to do anything with the tokens. We only want to consume the iterator
|
// We don't need to do anything with the tokens. We only want to consume the iterator
|
||||||
// so we can safely call performEOFFixup.
|
// so we can safely call performEOFFixup.
|
||||||
|
|
@ -371,10 +290,7 @@ add_task(function test_lexer_eofchar() {
|
||||||
expectedNoAppend,
|
expectedNoAppend,
|
||||||
argText = cssText,
|
argText = cssText,
|
||||||
] of EOFCHAR_TESTS) {
|
] of EOFCHAR_TESTS) {
|
||||||
info(`Test "${cssText}" with js-based lexer`);
|
info(`Test "${cssText}"`);
|
||||||
test(cssText, false, expectedAppend, expectedNoAppend, argText);
|
test(cssText, expectedAppend, expectedNoAppend, argText);
|
||||||
|
|
||||||
info(`Test "${cssText}" with rust-based lexer`);
|
|
||||||
test(cssText, true, expectedAppend, expectedNoAppend, argText);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue