Bug 1454696 - Run eslint --fix for prefer-const;r=yulia

MozReview-Commit-ID: F6xUXCgdRE4
This commit is contained in:
Julian Descottes
2018-06-01 12:36:09 +02:00
parent 281dfc0c1f
commit 82711b9a8f
2873 changed files with 24923 additions and 24923 deletions

View File

@@ -49,9 +49,9 @@ const COMMENT_PARSING_HEURISTIC_BYPASS_CHAR = "!";
* @see CSSToken for details about the returned tokens
*/
function* cssTokenizer(string) {
let lexer = getCSSLexer(string);
const lexer = getCSSLexer(string);
while (true) {
let token = lexer.nextToken();
const token = lexer.nextToken();
if (!token) {
break;
}
@@ -82,13 +82,13 @@ function* cssTokenizer(string) {
* line and column information.
*/
function cssTokenizerWithLineColumn(string) {
let lexer = getCSSLexer(string);
let result = [];
const lexer = getCSSLexer(string);
const result = [];
let prevToken = undefined;
while (true) {
let token = lexer.nextToken();
let lineNumber = lexer.lineNumber;
let columnNumber = lexer.columnNumber;
const token = lexer.nextToken();
const lineNumber = lexer.lineNumber;
const columnNumber = lexer.columnNumber;
if (prevToken) {
prevToken.loc.end = {
@@ -105,7 +105,7 @@ function cssTokenizerWithLineColumn(string) {
// We've already dealt with the previous token's location.
prevToken = undefined;
} else {
let startLoc = {
const startLoc = {
line: lineNumber,
column: columnNumber
};
@@ -129,7 +129,7 @@ function cssTokenizerWithLineColumn(string) {
* @return {String} the escaped result
*/
function escapeCSSComment(inputString) {
let result = inputString.replace(/\/(\\*)\*/g, "/\\$1*");
const result = inputString.replace(/\/(\\*)\*/g, "/\\$1*");
return result.replace(/\*(\\*)\//g, "*\\$1/");
}
@@ -143,7 +143,7 @@ function escapeCSSComment(inputString) {
* @return {String} the un-escaped result
*/
function unescapeCSSComment(inputString) {
let result = inputString.replace(/\/\\(\\*)\*/g, "/$1*");
const result = inputString.replace(/\/\\(\\*)\*/g, "/$1*");
return result.replace(/\*\\(\\*)\//g, "*$1/");
}
@@ -178,7 +178,7 @@ function parseCommentDeclarations(isCssPropertyKnown, commentText, startOffset,
commentText = commentText.substring(1);
}
let rewrittenText = unescapeCSSComment(commentText);
const rewrittenText = unescapeCSSComment(commentText);
// We might have rewritten an embedded comment. For example
// /\* ... *\/ would turn into /* ... */.
@@ -195,11 +195,11 @@ function parseCommentDeclarations(isCssPropertyKnown, commentText, startOffset,
//
// Note we allocate one extra entry because we can see an ending
// offset that is equal to the length.
let rewrites = new Array(rewrittenText.length + 1).fill(0);
const rewrites = new Array(rewrittenText.length + 1).fill(0);
let commentRe = /\/\\*\*|\*\\*\//g;
const commentRe = /\/\\*\*|\*\\*\//g;
while (true) {
let matchData = commentRe.exec(rewrittenText);
const matchData = commentRe.exec(rewrittenText);
if (!matchData) {
break;
}
@@ -222,9 +222,9 @@ function parseCommentDeclarations(isCssPropertyKnown, commentText, startOffset,
// seem worthwhile to support declarations in comments-in-comments
// here, as there's no way to generate those using the tools, and
// users would be crazy to write such things.
let newDecls = parseDeclarationsInternal(isCssPropertyKnown, rewrittenText,
const newDecls = parseDeclarationsInternal(isCssPropertyKnown, rewrittenText,
false, true, commentOverride);
for (let decl of newDecls) {
for (const decl of newDecls) {
decl.offsets[0] = rewrites[decl.offsets[0]];
decl.offsets[1] = rewrites[decl.offsets[1]];
decl.colonOffsets[0] = rewrites[decl.colonOffsets[0]];
@@ -252,7 +252,7 @@ function getEmptyDeclaration() {
* Like trim, but only trims CSS-allowed whitespace.
*/
function cssTrim(str) {
let match = /^[ \t\r\n\f]*(.*?)[ \t\r\n\f]*$/.exec(str);
const match = /^[ \t\r\n\f]*(.*?)[ \t\r\n\f]*$/.exec(str);
if (match) {
return match[1];
}
@@ -263,7 +263,7 @@ function cssTrim(str) {
* Like trimRight, but only trims CSS-allowed whitespace.
*/
function cssTrimRight(str) {
let match = /^(.*?)[ \t\r\n\f]*$/.exec(str);
const match = /^(.*?)[ \t\r\n\f]*$/.exec(str);
if (match) {
return match[1];
}
@@ -297,7 +297,7 @@ function parseDeclarationsInternal(isCssPropertyKnown, inputString,
throw new Error("empty input string");
}
let lexer = getCSSLexer(inputString);
const lexer = getCSSLexer(inputString);
let declarations = [getEmptyDeclaration()];
let lastProp = declarations[0];
@@ -313,7 +313,7 @@ function parseDeclarationsInternal(isCssPropertyKnown, inputString,
let importantWS = false;
let current = "";
while (true) {
let token = lexer.nextToken();
const token = lexer.nextToken();
if (!token) {
break;
}
@@ -411,14 +411,14 @@ function parseDeclarationsInternal(isCssPropertyKnown, inputString,
}
} else if (token.tokenType === "comment") {
if (parseComments && !lastProp.name && !lastProp.value) {
let commentText = inputString.substring(token.startOffset + 2,
const commentText = inputString.substring(token.startOffset + 2,
token.endOffset - 2);
let newDecls = parseCommentDeclarations(isCssPropertyKnown, commentText,
const newDecls = parseCommentDeclarations(isCssPropertyKnown, commentText,
token.startOffset,
token.endOffset);
// Insert the new declarations just before the final element.
let lastDecl = declarations.pop();
const lastDecl = declarations.pop();
declarations = [...declarations, ...newDecls, lastDecl];
} else {
current = current.trimRight() + " ";
@@ -455,7 +455,7 @@ function parseDeclarationsInternal(isCssPropertyKnown, inputString,
current += "!";
}
lastProp.value = cssTrim(current);
let terminator = lexer.performEOFFixup("", true);
const terminator = lexer.performEOFFixup("", true);
lastProp.terminator = terminator + ";";
// If the input was unterminated, attribute the remainder to
// this property. This avoids some bad behavior when rewriting
@@ -637,7 +637,7 @@ RuleRewriter.prototype = {
getIndentation: function(string, offset) {
let originalOffset = offset;
for (--offset; offset >= 0; --offset) {
let c = string[offset];
const c = string[offset];
if (c === "\r" || c === "\n" || c === "\f") {
return string.substring(offset + 1, originalOffset);
}
@@ -674,15 +674,15 @@ RuleRewriter.prototype = {
// into "url(;)" by this code -- due to the way "url(...)" is
// parsed as a single token.
text = text.replace(/;$/, "");
let lexer = getCSSLexer(text);
const lexer = getCSSLexer(text);
let result = "";
let previousOffset = 0;
let parenStack = [];
const parenStack = [];
let anySanitized = false;
// Push a closing paren on the stack.
let pushParen = (token, closer) => {
const pushParen = (token, closer) => {
result = result + text.substring(previousOffset, token.startOffset) +
text.substring(token.startOffset, token.endOffset);
// We set the location of the paren in a funny way, to handle
@@ -693,9 +693,9 @@ RuleRewriter.prototype = {
};
// Pop a closing paren from the stack.
let popSomeParens = (closer) => {
const popSomeParens = (closer) => {
while (parenStack.length > 0) {
let paren = parenStack.pop();
const paren = parenStack.pop();
if (paren.closer === closer) {
return true;
@@ -711,7 +711,7 @@ RuleRewriter.prototype = {
};
while (true) {
let token = lexer.nextToken();
const token = lexer.nextToken();
if (!token) {
break;
}
@@ -762,7 +762,7 @@ RuleRewriter.prototype = {
// Copy out any remaining text, then any needed terminators.
result += text.substring(previousOffset, text.length);
let eofFixup = lexer.performEOFFixup("", true);
const eofFixup = lexer.performEOFFixup("", true);
if (eofFixup) {
anySanitized = true;
result += eofFixup;
@@ -802,14 +802,14 @@ RuleRewriter.prototype = {
return;
}
let termDecl = this.declarations[index];
const termDecl = this.declarations[index];
let endIndex = termDecl.offsets[1];
// Due to an oddity of the lexer, we might have gotten a bit of
// extra whitespace in a trailing bad_url token -- so be sure to
// skip that as well.
endIndex = this.skipWhitespaceBackward(this.result, endIndex) + 1;
let trailingText = this.result.substring(endIndex);
const trailingText = this.result.substring(endIndex);
if (termDecl.terminator) {
// Insert the terminator just at the end of the declaration,
// before any trailing whitespace.
@@ -844,7 +844,7 @@ RuleRewriter.prototype = {
* @return {String} The sanitized text.
*/
sanitizeText: function(text, index) {
let [anySanitized, sanitizedText] = this.sanitizePropertyValue(text);
const [anySanitized, sanitizedText] = this.sanitizePropertyValue(text);
if (anySanitized) {
this.changedDeclarations[index] = sanitizedText;
}
@@ -880,7 +880,7 @@ RuleRewriter.prototype = {
let copyOffset = decl.offsets[1];
if (isEnabled) {
// Enable it. First see if the comment start can be deleted.
let commentStart = decl.commentOffsets[0];
const commentStart = decl.commentOffsets[0];
if (EMPTY_COMMENT_START_RX.test(this.result.substring(commentStart))) {
this.result = this.result.substring(0, commentStart);
} else {
@@ -889,7 +889,7 @@ RuleRewriter.prototype = {
// Insert the name and value separately, so we can report
// sanitization changes properly.
let commentNamePart =
const commentNamePart =
this.inputString.substring(decl.offsets[0],
decl.colonOffsets[1]);
this.result += unescapeCSSComment(commentNamePart);
@@ -903,7 +903,7 @@ RuleRewriter.prototype = {
this.result += this.sanitizeText(newText, index) + ";";
// See if the comment end can be deleted.
let trailingText = this.inputString.substring(decl.offsets[1]);
const trailingText = this.inputString.substring(decl.offsets[1]);
if (EMPTY_COMMENT_END_RX.test(trailingText)) {
copyOffset = decl.commentOffsets[1];
} else {
@@ -912,7 +912,7 @@ RuleRewriter.prototype = {
} else {
// Disable it. Note that we use our special comment syntax
// here.
let declText = this.inputString.substring(decl.offsets[0],
const declText = this.inputString.substring(decl.offsets[0],
decl.offsets[1]);
this.result += "/*" + COMMENT_PARSING_HEURISTIC_BYPASS_CHAR +
" " + escapeCSSComment(declText) + " */";
@@ -968,7 +968,7 @@ RuleRewriter.prototype = {
// is actually used.
let savedWhitespace = "";
if (this.hasNewLine) {
let wsOffset = this.skipWhitespaceBackward(this.result,
const wsOffset = this.skipWhitespaceBackward(this.result,
this.result.length);
if (this.result[wsOffset] === "\r" || this.result[wsOffset] === "\n") {
savedWhitespace = this.result.substring(wsOffset + 1);
@@ -1082,12 +1082,12 @@ RuleRewriter.prototype = {
// bother with this if we're looking at sources that already
// have a newline somewhere.
if (this.hasNewLine) {
let nlOffset = this.skipWhitespaceBackward(this.result,
const nlOffset = this.skipWhitespaceBackward(this.result,
this.decl.offsets[0]);
if (nlOffset < 0 || this.result[nlOffset] === "\r" ||
this.result[nlOffset] === "\n") {
let trailingText = this.inputString.substring(copyOffset);
let match = BLANK_LINE_RX.exec(trailingText);
const trailingText = this.inputString.substring(copyOffset);
const match = BLANK_LINE_RX.exec(trailingText);
if (match) {
this.result = this.result.substring(0, nlOffset + 1);
copyOffset += match[0].length;
@@ -1159,14 +1159,14 @@ function parsePseudoClassesAndAttributes(value) {
throw new Error("empty input string");
}
let tokens = cssTokenizer(value);
let result = [];
const tokens = cssTokenizer(value);
const result = [];
let current = "";
let functionCount = 0;
let hasAttribute = false;
let hasColon = false;
for (let token of tokens) {
for (const token of tokens) {
if (token.tokenType === "ident") {
current += value.substring(token.startOffset, token.endOffset);
@@ -1253,7 +1253,7 @@ function parsePseudoClassesAndAttributes(value) {
* @return {Object} an object with 'value' and 'priority' properties.
*/
function parseSingleValue(isCssPropertyKnown, value) {
let declaration = parseDeclarations(isCssPropertyKnown,
const declaration = parseDeclarations(isCssPropertyKnown,
"a: " + value + ";")[0];
return {
value: declaration ? declaration.value : "",