Bug 1879120 - Remove all python deprecation warning: invalid escape sequence r=ahochheiden,webdriver-reviewers,perftest-reviewers,afinder UPGRADE_NSPR_RELEASE

Differential Revision: https://phabricator.services.mozilla.com/D201012
This commit is contained in:
serge-sans-paille 2024-02-26 08:26:38 +00:00
parent 051eb870c9
commit d07a1a5ec5
69 changed files with 132 additions and 130 deletions

View file

@ -20,9 +20,9 @@ from mozfile import NamedTemporaryFile, TemporaryDirectory
from mozprofile.permissions import ServerLocations from mozprofile.permissions import ServerLocations
dbFiles = [ dbFiles = [
re.compile("^cert[0-9]+\.db$"), re.compile(r"^cert[0-9]+\.db$"),
re.compile("^key[0-9]+\.db$"), re.compile(r"^key[0-9]+\.db$"),
re.compile("^secmod\.db$"), re.compile(r"^secmod\.db$"),
] ]
@ -77,7 +77,7 @@ def writeCertspecForServerLocations(fd):
i for i in iter(locations) if i.scheme == "https" and "nocert" not in i.options i for i in iter(locations) if i.scheme == "https" and "nocert" not in i.options
]: ]:
customCertOption = False customCertOption = False
customCertRE = re.compile("^cert=(?:\w+)") customCertRE = re.compile(r"^cert=(?:\w+)")
for _ in [i for i in loc.options if customCertRE.match(i)]: for _ in [i for i in loc.options if customCertRE.match(i)]:
customCertOption = True customCertOption = True
break break

View file

@ -48,7 +48,7 @@ def filter_git_changes(github_path, commit_sha, diff_filter):
# out the excluded directory paths (note the lack of trailing '$' # out the excluded directory paths (note the lack of trailing '$'
# in the regex). # in the regex).
regex_excludes = "|".join( regex_excludes = "|".join(
["^(M|A|D|R\d\d\d)\t{}".format(i) for i in exclude_dir_list] ["^(M|A|D|R\\d\\d\\d)\t{}".format(i) for i in exclude_dir_list]
) )
files_not_excluded = [ files_not_excluded = [
path for path in changed_files if not re.findall(regex_excludes, path) path for path in changed_files if not re.findall(regex_excludes, path)

View file

@ -52,7 +52,7 @@ def save_patch_stack(
# remove the commit summary from the file name # remove the commit summary from the file name
patches_to_rename = os.listdir(patch_directory) patches_to_rename = os.listdir(patch_directory)
for file in patches_to_rename: for file in patches_to_rename:
shortened_name = re.sub("^(\d\d\d\d)-.*\.patch", "\\1.patch", file) shortened_name = re.sub(r"^(\d\d\d\d)-.*\.patch", "\\1.patch", file)
os.rename( os.rename(
os.path.join(patch_directory, file), os.path.join(patch_directory, file),
os.path.join(patch_directory, shortened_name), os.path.join(patch_directory, shortened_name),

View file

@ -5,7 +5,7 @@
assert __name__ == "__main__" assert __name__ == "__main__"
""" r"""
To update ANGLE in Gecko, use Windows with git-bash, and setup depot_tools, python2, and To update ANGLE in Gecko, use Windows with git-bash, and setup depot_tools, python2, and
python3. Because depot_tools expects `python` to be `python2` (shame!), python2 must come python3. Because depot_tools expects `python` to be `python2` (shame!), python2 must come
before python3 in your path. before python3 in your path.

View file

@ -9,7 +9,7 @@ import sys
f = open(sys.argv[1] if len(sys.argv) > 1 else "StandardizedVariants.txt") f = open(sys.argv[1] if len(sys.argv) > 1 else "StandardizedVariants.txt")
line = f.readline() line = f.readline()
m = re.compile("^# (StandardizedVariants(-\d+(\.\d+)*)?\.txt)").search(line) m = re.compile(r"^# (StandardizedVariants(-\d+(\.\d+)*)?\.txt)").search(line)
fileversion = m.group(1) fileversion = m.group(1)
vsdict = {} vsdict = {}
r = re.compile( r = re.compile(

View file

@ -2213,7 +2213,7 @@ def listIANAFiles(tzdataDir):
def readIANAFiles(tzdataDir, files): def readIANAFiles(tzdataDir, files):
"""Read all IANA time zone files from the given iterable.""" """Read all IANA time zone files from the given iterable."""
nameSyntax = "[\w/+\-]+" nameSyntax = r"[\w/+\-]+"
pZone = re.compile(r"Zone\s+(?P<name>%s)\s+.*" % nameSyntax) pZone = re.compile(r"Zone\s+(?P<name>%s)\s+.*" % nameSyntax)
pLink = re.compile( pLink = re.compile(
r"Link\s+(?P<target>%s)\s+(?P<name>%s)(?:\s+#.*)?" % (nameSyntax, nameSyntax) r"Link\s+(?P<target>%s)\s+(?P<name>%s)(?:\s+#.*)?" % (nameSyntax, nameSyntax)
@ -2310,7 +2310,7 @@ def readICUResourceFile(filename):
maybeMultiComments = r"(?:/\*[^*]*\*/)*" maybeMultiComments = r"(?:/\*[^*]*\*/)*"
maybeSingleComment = r"(?://.*)?" maybeSingleComment = r"(?://.*)?"
lineStart = "^%s" % maybeMultiComments lineStart = "^%s" % maybeMultiComments
lineEnd = "%s\s*%s$" % (maybeMultiComments, maybeSingleComment) lineEnd = r"%s\s*%s$" % (maybeMultiComments, maybeSingleComment)
return re.compile(r"\s*".join(chain([lineStart], args, [lineEnd]))) return re.compile(r"\s*".join(chain([lineStart], args, [lineEnd])))
tableName = r'(?P<quote>"?)(?P<name>.+?)(?P=quote)' tableName = r'(?P<quote>"?)(?P<name>.+?)(?P=quote)'
@ -2554,7 +2554,7 @@ def icuTzDataVersion(icuTzDir):
zoneinfo = os.path.join(icuTzDir, "zoneinfo64.txt") zoneinfo = os.path.join(icuTzDir, "zoneinfo64.txt")
if not os.path.isfile(zoneinfo): if not os.path.isfile(zoneinfo):
raise RuntimeError("file not found: %s" % zoneinfo) raise RuntimeError("file not found: %s" % zoneinfo)
version = searchInFile("^//\s+tz version:\s+([0-9]{4}[a-z])$", zoneinfo) version = searchInFile(r"^//\s+tz version:\s+([0-9]{4}[a-z])$", zoneinfo)
if version is None: if version is None:
raise RuntimeError( raise RuntimeError(
"%s does not contain a valid tzdata version string" % zoneinfo "%s does not contain a valid tzdata version string" % zoneinfo
@ -3711,7 +3711,7 @@ const allUnits = {};
""".format( """.format(
all_units_array all_units_array
) )
+ """ + r"""
// Test only sanctioned unit identifiers are allowed. // Test only sanctioned unit identifiers are allowed.
for (const typeAndUnit of allUnits) { for (const typeAndUnit of allUnits) {

View file

@ -22,7 +22,7 @@ ALIGNMENT_COLUMN = 20
# The maximum column for comment # The maximum column for comment
MAX_CHARS_PER_LINE = 80 MAX_CHARS_PER_LINE = 80
stack_comment_pat = re.compile("^( *//) *(\[stack\].*)$") stack_comment_pat = re.compile(r"^( *//) *(\[stack\].*)$")
def align_stack_comment(path): def align_stack_comment(path):

View file

@ -271,7 +271,7 @@ def implemented_types(t):
yield t2 yield t2
template_regexp = re.compile("([\w_:]+)<") template_regexp = re.compile(r"([\w_:]+)<")
def is_struct_or_union(t): def is_struct_or_union(t):

View file

@ -40,9 +40,9 @@ def _relpath(path, start=None):
os.path.relpath = _relpath os.path.relpath = _relpath
# Characters that need to be escaped when used in shell words. # Characters that need to be escaped when used in shell words.
shell_need_escapes = re.compile("[^\w\d%+,-./:=@'\"]", re.DOTALL) shell_need_escapes = re.compile("[^\\w\\d%+,-./:=@'\"]", re.DOTALL)
# Characters that need to be escaped within double-quoted strings. # Characters that need to be escaped within double-quoted strings.
shell_dquote_escapes = re.compile('[^\w\d%+,-./:=@"]', re.DOTALL) shell_dquote_escapes = re.compile('[^\\w\\d%+,-./:=@"]', re.DOTALL)
def make_shell_cmd(l): def make_shell_cmd(l):

View file

@ -13,10 +13,10 @@ run_fragment("ExecutableAllocator.onepool")
reExecPool = "ExecutablePool [a-f0-9]{8,}-[a-f0-9]{8,}" reExecPool = "ExecutablePool [a-f0-9]{8,}-[a-f0-9]{8,}"
assert_regexp_pretty("pool", reExecPool) assert_regexp_pretty("pool", reExecPool)
assert_regexp_pretty("execAlloc", "ExecutableAllocator\(\[" + reExecPool + "\]\)") assert_regexp_pretty("execAlloc", r"ExecutableAllocator\(\[" + reExecPool + r"\]\)")
run_fragment("ExecutableAllocator.twopools") run_fragment("ExecutableAllocator.twopools")
assert_regexp_pretty( assert_regexp_pretty(
"execAlloc", "ExecutableAllocator\(\[" + reExecPool + ", " + reExecPool + "\]\)" "execAlloc", r"ExecutableAllocator\(\[" + reExecPool + ", " + reExecPool + r"\]\)"
) )

View file

@ -160,7 +160,7 @@ do_test()
# #
# The timeout command send a SIGTERM signal, which should return 143 # The timeout command send a SIGTERM signal, which should return 143
# (=128+15). However, due to a bug in tinybox, it returns 142. # (=128+15). However, due to a bug in tinybox, it returns 142.
if test \( $rc -eq 143 -o $rc -eq 142 \) -a $attempt -lt {retry}; then if test \\( $rc -eq 143 -o $rc -eq 142 \\) -a $attempt -lt {retry}; then
echo '\\n{tag}RETRY='$rc,$time echo '\\n{tag}RETRY='$rc,$time
attempt=$((attempt + 1)) attempt=$((attempt + 1))
do_test $idx $attempt "$@" do_test $idx $attempt "$@"

View file

@ -18,7 +18,7 @@ def to_code_list(codes):
def convert(dir): def convert(dir):
ver_pat = re.compile("NormalizationTest-([0-9\.]+)\.txt") ver_pat = re.compile(r"NormalizationTest-([0-9\.]+)\.txt")
part_pat = re.compile("^@(Part([0-9]+) .+)$") part_pat = re.compile("^@(Part([0-9]+) .+)$")
test_pat = re.compile( test_pat = re.compile(
"^([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);$" "^([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);([0-9A-Fa-f ]+);$"

View file

@ -237,7 +237,7 @@ def mergeMeta(reftest, frontmatter, includes):
if info: if info:
# Open some space in an existing info text # Open some space in an existing info text
if "info" in frontmatter: if "info" in frontmatter:
frontmatter["info"] += "\n\n \%s" % info frontmatter["info"] += "\n\n \\%s" % info
else: else:
frontmatter["info"] = info frontmatter["info"] = info

View file

@ -1111,7 +1111,7 @@ def make_regexp_space_test(version, test_space_table, codepoint_table):
test_space.write(",\n".join(map(hex_and_name, test_space_table))) test_space.write(",\n".join(map(hex_and_name, test_space_table)))
test_space.write("\n);\n") test_space.write("\n);\n")
test_space.write( test_space.write(
""" r"""
assertEq(/^\s+$/.exec(onlySpace) !== null, true); assertEq(/^\s+$/.exec(onlySpace) !== null, true);
assertEq(/^[\s]+$/.exec(onlySpace) !== null, true); assertEq(/^[\s]+$/.exec(onlySpace) !== null, true);
assertEq(/^[^\s]+$/.exec(onlySpace) === null, true); assertEq(/^[^\s]+$/.exec(onlySpace) === null, true);

View file

@ -252,7 +252,7 @@ class ReftestResolver(object):
rv = [ rv = [
( (
os.path.join(dirname, default_manifest), os.path.join(dirname, default_manifest),
r".*%s(?:[#?].*)?$" % pathname.replace("?", "\?"), r".*%s(?:[#?].*)?$" % pathname.replace("?", r"\?"),
) )
] ]

View file

@ -83,7 +83,7 @@ def substs(variables, values):
# Safe substitute leaves unrecognized variables in place. # Safe substitute leaves unrecognized variables in place.
# We replace them with the empty string. # We replace them with the empty string.
new_values.append(re.sub('\$\{\w+\}', '', new_value)) new_values.append(re.sub(r'\$\{\w+\}', '', new_value))
return new_values return new_values
@ -240,7 +240,7 @@ def evaluate_boolean(variables, arguments):
# If statements can have old-style variables which are not demarcated # If statements can have old-style variables which are not demarcated
# like ${VARIABLE}. Attempt to look up the variable both ways. # like ${VARIABLE}. Attempt to look up the variable both ways.
try: try:
if re.search('\$\{\w+\}', argument): if re.search(r'\$\{\w+\}', argument):
try: try:
t = Template(argument) t = Template(argument)
value = t.substitute(variables) value = t.substitute(variables)

View file

@ -46,13 +46,13 @@ def toggle_beta_status(is_beta):
check_files_exist() check_files_exist()
if (is_beta): if (is_beta):
print("adding Beta status to version numbers") print("adding Beta status to version numbers")
sed_inplace('s/^\(#define *PR_VERSION *\"[0-9.]\+\)\" *$/\\1 Beta\"/', prinit_h) sed_inplace('s/^\\(#define *PR_VERSION *\"[0-9.]\\+\\)\" *$/\\1 Beta\"/', prinit_h)
sed_inplace('s/^\(#define *PR_BETA *\)PR_FALSE *$/\\1PR_TRUE/', prinit_h) sed_inplace('s/^\\(#define *PR_BETA *\\)PR_FALSE *$/\\1PR_TRUE/', prinit_h)
else: else:
print("removing Beta status from version numbers") print("removing Beta status from version numbers")
sed_inplace('s/^\(#define *PR_VERSION *\"[0-9.]\+\) *Beta\" *$/\\1\"/', prinit_h) sed_inplace('s/^\\(#define *PR_VERSION *\"[0-9.]\\+\\) *Beta\" *$/\\1\"/', prinit_h)
sed_inplace('s/^\(#define *PR_BETA *\)PR_TRUE *$/\\1PR_FALSE/', prinit_h) sed_inplace('s/^\\(#define *PR_BETA *\\)PR_TRUE *$/\\1PR_FALSE/', prinit_h)
print("please run 'hg stat' and 'hg diff' to verify the files have been verified correctly") print("please run 'hg stat' and 'hg diff' to verify the files have been verified correctly")
def print_beta_versions(): def print_beta_versions():
@ -81,22 +81,22 @@ def ensure_arguments_after_action(how_many, usage):
exit_with_failure("incorrect number of arguments, expected parameters are:\n" + usage) exit_with_failure("incorrect number of arguments, expected parameters are:\n" + usage)
def set_major_versions(major): def set_major_versions(major):
sed_inplace('s/^\(#define *PR_VMAJOR *\).*$/\\1' + major + '/', prinit_h) sed_inplace('s/^\\(#define *PR_VMAJOR *\\).*$/\\1' + major + '/', prinit_h)
sed_inplace('s/^MOD_MAJOR_VERSION=.*$/MOD_MAJOR_VERSION=' + major + '/', f_conf) sed_inplace('s/^MOD_MAJOR_VERSION=.*$/MOD_MAJOR_VERSION=' + major + '/', f_conf)
sed_inplace('s/^MOD_MAJOR_VERSION=.*$/MOD_MAJOR_VERSION=' + major + '/', f_conf_in) sed_inplace('s/^MOD_MAJOR_VERSION=.*$/MOD_MAJOR_VERSION=' + major + '/', f_conf_in)
def set_minor_versions(minor): def set_minor_versions(minor):
sed_inplace('s/^\(#define *PR_VMINOR *\).*$/\\1' + minor + '/', prinit_h) sed_inplace('s/^\\(#define *PR_VMINOR *\\).*$/\\1' + minor + '/', prinit_h)
sed_inplace('s/^MOD_MINOR_VERSION=.*$/MOD_MINOR_VERSION=' + minor + '/', f_conf) sed_inplace('s/^MOD_MINOR_VERSION=.*$/MOD_MINOR_VERSION=' + minor + '/', f_conf)
sed_inplace('s/^MOD_MINOR_VERSION=.*$/MOD_MINOR_VERSION=' + minor + '/', f_conf_in) sed_inplace('s/^MOD_MINOR_VERSION=.*$/MOD_MINOR_VERSION=' + minor + '/', f_conf_in)
def set_patch_versions(patch): def set_patch_versions(patch):
sed_inplace('s/^\(#define *PR_VPATCH *\).*$/\\1' + patch + '/', prinit_h) sed_inplace('s/^\\(#define *PR_VPATCH *\\).*$/\\1' + patch + '/', prinit_h)
sed_inplace('s/^MOD_PATCH_VERSION=.*$/MOD_PATCH_VERSION=' + patch + '/', f_conf) sed_inplace('s/^MOD_PATCH_VERSION=.*$/MOD_PATCH_VERSION=' + patch + '/', f_conf)
sed_inplace('s/^MOD_PATCH_VERSION=.*$/MOD_PATCH_VERSION=' + patch + '/', f_conf_in) sed_inplace('s/^MOD_PATCH_VERSION=.*$/MOD_PATCH_VERSION=' + patch + '/', f_conf_in)
def set_full_lib_versions(version): def set_full_lib_versions(version):
sed_inplace('s/^\(#define *PR_VERSION *\"\)\([0-9.]\+\)\(.*\)$/\\1' + version + '\\3/', prinit_h) sed_inplace('s/^\\(#define *PR_VERSION *\"\\)\\([0-9.]\\+\\)\\(.*\\)$/\\1' + version + '\\3/', prinit_h)
def set_all_lib_versions(version, major, minor, patch): def set_all_lib_versions(version, major, minor, patch):
set_full_lib_versions(version) set_full_lib_versions(version)

View file

@ -10,7 +10,7 @@ class STRIP_LABEL(TransformPattern):
# Used to remove `<label data-l10n-name="remove-search-engine-article">` from a string # Used to remove `<label data-l10n-name="remove-search-engine-article">` from a string
def visit_TextElement(self, node): def visit_TextElement(self, node):
node.value = re.sub( node.value = re.sub(
'\s?<label data-l10n-name="remove-search-engine-article">.+?</label>\s?', r'\s?<label data-l10n-name="remove-search-engine-article">.+?</label>\s?',
"", "",
node.value, node.value,
) )

View file

@ -375,7 +375,7 @@ def parse_chrome_manifest(path, base_path, chrome_entries):
### ###
def get_version_maybe_buildid(app_version): def get_version_maybe_buildid(app_version):
def _extract_numeric_part(part): def _extract_numeric_part(part):
matches = re.compile("[^\d]").search(part) matches = re.compile(r"[^\d]").search(part)
if matches: if matches:
part = part[0 : matches.start()] part = part[0 : matches.start()]
if len(part) == 0: if len(part) == 0:

View file

@ -171,14 +171,14 @@ class CppEclipseBackend(CommonBackend):
# Here we generate the code formatter that will show up in the UI with # Here we generate the code formatter that will show up in the UI with
# the name "Mozilla". The formatter is stored as a single line of XML # the name "Mozilla". The formatter is stored as a single line of XML
# in the org.eclipse.cdt.ui.formatterprofiles pref. # in the org.eclipse.cdt.ui.formatterprofiles pref.
cdt_ui_prefs += """org.eclipse.cdt.ui.formatterprofiles=<?xml version\="1.0" encoding\="UTF-8" standalone\="no"?>\\n<profiles version\="1">\\n<profile kind\="CodeFormatterProfile" name\="Mozilla" version\="1">\\n""" cdt_ui_prefs += r'org.eclipse.cdt.ui.formatterprofiles=<?xml version\="1.0" encoding\="UTF-8" standalone\="no"?>\n<profiles version\="1">\n<profile kind\="CodeFormatterProfile" name\="Mozilla" version\="1">\n'
XML_PREF_TEMPLATE = """<setting id\="@PREF_NAME@" value\="@PREF_VAL@"/>\\n""" XML_PREF_TEMPLATE = r'<setting id\="@PREF_NAME@" value\="@PREF_VAL@"/>\n'
for line in FORMATTER_SETTINGS.splitlines(): for line in FORMATTER_SETTINGS.splitlines():
[pref, val] = line.split("=") [pref, val] = line.split("=")
cdt_ui_prefs += XML_PREF_TEMPLATE.replace("@PREF_NAME@", pref).replace( cdt_ui_prefs += XML_PREF_TEMPLATE.replace("@PREF_NAME@", pref).replace(
"@PREF_VAL@", val "@PREF_VAL@", val
) )
cdt_ui_prefs += "</profile>\\n</profiles>\\n" cdt_ui_prefs += r"</profile>\n</profiles>\n"
with open(cdt_ui_prefs_path, "w") as fh: with open(cdt_ui_prefs_path, "w") as fh:
fh.write(cdt_ui_prefs) fh.write(cdt_ui_prefs)

View file

@ -25,7 +25,7 @@ from mozbuild.frontend.data import (
from .manifest_handler import ChromeManifestHandler from .manifest_handler import ChromeManifestHandler
_line_comment_re = re.compile('^//@line (\d+) "(.+)"$') _line_comment_re = re.compile(r'^//@line (\d+) "(.+)"$')
def generate_pp_info(path, topsrcdir): def generate_pp_info(path, topsrcdir):

View file

@ -37,7 +37,7 @@ def get_range_length(range, debug_ranges):
given offset.""" given offset."""
length = 0 length = 0
for line in debug_ranges.splitlines(): for line in debug_ranges.splitlines():
m = re.match("\s*([0-9a-fA-F]+)\s+([0-9a-fA-F]+)\s+([0-9a-fA-F]+)", line) m = re.match(r"\s*([0-9a-fA-F]+)\s+([0-9a-fA-F]+)\s+([0-9a-fA-F]+)", line)
if m and int(m.group(1), 16) == range: if m and int(m.group(1), 16) == range:
length += 1 length += 1
return length return length

View file

@ -38,7 +38,7 @@ class DotProperties:
line = l.strip() line = l.strip()
if not line or line.startswith("#"): if not line or line.startswith("#"):
continue continue
(k, v) = re.split("\s*=\s*", line, 1) (k, v) = re.split(r"\s*=\s*", line, 1)
self._properties[k] = v self._properties[k] = v
def get(self, key, default=None): def get(self, key, default=None):

View file

@ -813,7 +813,7 @@ def _sign_msix_win(output, force, log, verbose):
thumbprint.strip() thumbprint.strip()
for thumbprint in powershell( for thumbprint in powershell(
( (
"Get-ChildItem -Path Cert:\CurrentUser\My" r"Get-ChildItem -Path Cert:\CurrentUser\My"
'| Where-Object {{$_.Subject -Match "{}"}}' '| Where-Object {{$_.Subject -Match "{}"}}'
'| Where-Object {{$_.FriendlyName -Match "{}"}}' '| Where-Object {{$_.FriendlyName -Match "{}"}}'
"| Select-Object -ExpandProperty Thumbprint" "| Select-Object -ExpandProperty Thumbprint"
@ -838,7 +838,7 @@ def _sign_msix_win(output, force, log, verbose):
( (
'New-SelfSignedCertificate -Type Custom -Subject "{}" ' 'New-SelfSignedCertificate -Type Custom -Subject "{}" '
'-KeyUsage DigitalSignature -FriendlyName "{}"' '-KeyUsage DigitalSignature -FriendlyName "{}"'
" -CertStoreLocation Cert:\CurrentUser\My" r" -CertStoreLocation Cert:\CurrentUser\My"
' -TextExtension @("2.5.29.37={{text}}1.3.6.1.5.5.7.3.3", ' ' -TextExtension @("2.5.29.37={{text}}1.3.6.1.5.5.7.3.3", '
'"2.5.29.19={{text}}")' '"2.5.29.19={{text}}")'
"| Select-Object -ExpandProperty Thumbprint" "| Select-Object -ExpandProperty Thumbprint"
@ -856,7 +856,7 @@ def _sign_msix_win(output, force, log, verbose):
) )
powershell( powershell(
'Export-Certificate -Cert Cert:\CurrentUser\My\{} -FilePath "{}"'.format( r'Export-Certificate -Cert Cert:\CurrentUser\My\{} -FilePath "{}"'.format(
thumbprint, crt_path thumbprint, crt_path
) )
) )
@ -869,7 +869,7 @@ def _sign_msix_win(output, force, log, verbose):
powershell( powershell(
( (
'Export-PfxCertificate -Cert Cert:\CurrentUser\My\{} -FilePath "{}"' r'Export-PfxCertificate -Cert Cert:\CurrentUser\My\{} -FilePath "{}"'
' -Password (ConvertTo-SecureString -String "{}" -Force -AsPlainText)' ' -Password (ConvertTo-SecureString -String "{}" -Force -AsPlainText)'
).format(thumbprint, pfx_path, password) ).format(thumbprint, pfx_path, password)
) )
@ -940,7 +940,7 @@ def _sign_msix_win(output, force, log, verbose):
root_thumbprints = [ root_thumbprints = [
root_thumbprint.strip() root_thumbprint.strip()
for root_thumbprint in powershell( for root_thumbprint in powershell(
"Get-ChildItem -Path Cert:\LocalMachine\Root\{} " r"Get-ChildItem -Path Cert:\LocalMachine\Root\{} "
"| Select-Object -ExpandProperty Thumbprint".format(thumbprint), "| Select-Object -ExpandProperty Thumbprint".format(thumbprint),
check=False, check=False,
).splitlines() ).splitlines()

View file

@ -22,16 +22,16 @@ class CompilerPreprocessor(Preprocessor):
# For now, we don't look very hard for C strings because they don't matter # For now, we don't look very hard for C strings because they don't matter
# that much for our unit tests, but we at least avoid expanding in the # that much for our unit tests, but we at least avoid expanding in the
# simple "FOO" case. # simple "FOO" case.
VARSUBST = re.compile('(?<!")(?P<VAR>\w+)(?!")', re.U) VARSUBST = re.compile(r'(?<!")(?P<VAR>\w+)(?!")', re.U)
NON_WHITESPACE = re.compile("\S") NON_WHITESPACE = re.compile(r"\S")
HAS_FEATURE_OR_BUILTIN = re.compile( HAS_FEATURE_OR_BUILTIN = re.compile(
'(__has_(?:feature|builtin|attribute|warning))\("?([^"\)]*)"?\)' r'(__has_(?:feature|builtin|attribute|warning))\("?([^"\)]*)"?\)'
) )
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
Preprocessor.__init__(self, *args, **kwargs) Preprocessor.__init__(self, *args, **kwargs)
self.do_filter("c_substitution") self.do_filter("c_substitution")
self.setMarker("#\s*") self.setMarker(r"#\s*")
def do_if(self, expression, **kwargs): def do_if(self, expression, **kwargs):
# The C preprocessor handles numbers following C rules, which is a # The C preprocessor handles numbers following C rules, which is a

View file

@ -822,7 +822,7 @@ def edit_moz_build_file_to_remove_file(
""" """
simple_file_line = re.compile( simple_file_line = re.compile(
"^\s*['\"]" + unnormalized_filename_to_remove + "['\"],*$" "^\\s*['\"]" + unnormalized_filename_to_remove + "['\"],*$"
) )
did_replace = False did_replace = False

View file

@ -612,7 +612,7 @@ class VendorManifest(MozbuildObject):
if r[0] in l: if r[0] in l:
print("Found " + l) print("Found " + l)
replaced += 1 replaced += 1
yaml[i] = re.sub(r[0] + " [v\.a-f0-9]+.*$", r[0] + r[1], yaml[i]) yaml[i] = re.sub(r[0] + r" [v\.a-f0-9]+.*$", r[0] + r[1], yaml[i])
assert len(replacements) == replaced assert len(replacements) == replaced

View file

@ -172,7 +172,7 @@ def parseRepackConfig(file: Path, platform: str):
config[key] = value config[key] = value
continue continue
if key == "deb_section": if key == "deb_section":
config["deb_section"] = re.sub("/", "\/", value) config["deb_section"] = re.sub("/", r"\/", value)
continue continue
if isValidPlatform(key): if isValidPlatform(key):
ftp_platform = getFtpPlatform(key) ftp_platform = getFtpPlatform(key)

View file

@ -18,7 +18,7 @@ import build
# Matches lines like `GK_ATOM(foo, "foo", 0x12345678, true, nsStaticAtom, PseudoElementAtom)`. # Matches lines like `GK_ATOM(foo, "foo", 0x12345678, true, nsStaticAtom, PseudoElementAtom)`.
PATTERN = re.compile( PATTERN = re.compile(
'^GK_ATOM\(([^,]*),[^"]*"([^"]*)",\s*(0x[0-9a-f]+),\s*[^,]*,\s*([^,]*),\s*([^)]*)\)', r'^GK_ATOM\(([^,]*),[^"]*"([^"]*)",\s*(0x[0-9a-f]+),\s*[^,]*,\s*([^,]*),\s*([^)]*)\)',
re.MULTILINE, re.MULTILINE,
) )
FILE = "include/nsGkAtomList.h" FILE = "include/nsGkAtomList.h"

View file

@ -112,12 +112,12 @@ def collect_jsm(files):
# js files with EXPORTED_SYMBOLS # js files with EXPORTED_SYMBOLS
if mode == "hg": if mode == "hg":
cmd = ["hg", "files", "set:grep('EXPORTED_SYMBOLS = \[') and glob:**/*.js"] cmd = ["hg", "files", r"set:grep('EXPORTED_SYMBOLS = \[') and glob:**/*.js"]
for line in run(cmd): for line in run(cmd):
put_file(files, kind, pathlib.Path(line)) put_file(files, kind, pathlib.Path(line))
else: else:
handled = {} handled = {}
cmd = ["git", "grep", "EXPORTED_SYMBOLS = \[", "*.js"] cmd = ["git", "grep", r"EXPORTED_SYMBOLS = \[", "*.js"]
for line in run(cmd): for line in run(cmd):
m = re.search("^([^:]+):", line) m = re.search("^([^:]+):", line)
if not m: if not m:

View file

@ -18,7 +18,7 @@ def read(*parts):
def get_version(): def get_version():
return re.findall( return re.findall(
'__version__ = "([\d\.]+)"', read("firefox_ui_harness", "__init__.py"), re.M r'__version__ = "([\d\.]+)"', read("firefox_ui_harness", "__init__.py"), re.M
)[0] )[0]

View file

@ -157,7 +157,7 @@ class Ares6(Benchmark):
self.scores = defaultdict(lambda: defaultdict(list)) self.scores = defaultdict(lambda: defaultdict(list))
def _try_find_score(self, score_name, line): def _try_find_score(self, score_name, line):
m = re.search(score_name + ":\s*(\d+\.?\d*?) (\+-)?.+", line) m = re.search(score_name + r":\s*(\d+\.?\d*?) (\+-)?.+", line)
if not m: if not m:
return False return False
@ -168,7 +168,7 @@ class Ares6(Benchmark):
def process_line(self, proc, line): def process_line(self, proc, line):
line = line.strip("\n") line = line.strip("\n")
print(line) print(line)
m = re.search("Running... (.+) \(.+\)", line) m = re.search(r"Running... (.+) \(.+\)", line)
if m: if m:
self.bench_name = m.group(1) self.bench_name = m.group(1)
return return
@ -182,7 +182,7 @@ class Ares6(Benchmark):
if self._try_find_score("steadyState", line): if self._try_find_score("steadyState", line):
return return
m = re.search("summary:\s*(\d+\.?\d*?) (\+-)?.+", line) m = re.search(r"summary:\s*(\d+\.?\d*?) (\+-)?.+", line)
if m: if m:
self.last_summary = float(m.group(1)) self.last_summary = float(m.group(1))
@ -218,7 +218,7 @@ class SixSpeed(RunOnceBenchmark):
def process_line(self, proc, output): def process_line(self, proc, output):
output = output.strip("\n") output = output.strip("\n")
print(output) print(output)
m = re.search("(.+): (\d+)", output) m = re.search(r"(.+): (\d+)", output)
if not m: if not m:
return return
subtest = m.group(1) subtest = m.group(1)
@ -250,7 +250,7 @@ class SunSpider(RunOnceBenchmark):
def process_line(self, proc, output): def process_line(self, proc, output):
output = output.strip("\n") output = output.strip("\n")
print(output) print(output)
m = re.search("(.+): (\d+)", output) m = re.search(r"(.+): (\d+)", output)
if not m: if not m:
return return
subtest = m.group(1) subtest = m.group(1)
@ -339,7 +339,7 @@ class Octane(RunOnceBenchmark):
def process_line(self, proc, output): def process_line(self, proc, output):
output = output.strip("\n") output = output.strip("\n")
print(output) print(output)
m = re.search("(.+): (\d+)", output) m = re.search(r"(.+): (\d+)", output)
if not m: if not m:
return return
subtest = m.group(1) subtest = m.group(1)

View file

@ -17,7 +17,7 @@ def read(*parts):
def get_version(): def get_version():
return re.findall( return re.findall(
'__version__ = "([\d\.]+)"', read("marionette_driver", "__init__.py"), re.M r'__version__ = "([\d\.]+)"', read("marionette_driver", "__init__.py"), re.M
)[0] )[0]

View file

@ -730,7 +730,7 @@ class BaseMarionetteTestRunner(object):
@property @property
def filename_pattern(self): def filename_pattern(self):
if self._filename_pattern is None: if self._filename_pattern is None:
self._filename_pattern = re.compile("^test(((_.+?)+?\.((py))))$") self._filename_pattern = re.compile(r"^test(((_.+?)+?\.((py))))$")
return self._filename_pattern return self._filename_pattern

View file

@ -47,6 +47,6 @@ class TestPageSource(MarionetteTestCase):
import re import re
self.assertEqual( self.assertEqual(
re.sub("\s", "", source), "<xml><foo><bar>baz</bar></foo></xml>" re.sub(r"\s", "", source), "<xml><foo><bar>baz</bar></foo></xml>"
) )
self.assertEqual(source, from_web_api) self.assertEqual(source, from_web_api)

View file

@ -17,7 +17,7 @@ def read(*parts):
def get_version(): def get_version():
return re.findall( return re.findall(
'__version__ = "([\d\.]+)"', read("marionette_harness", "__init__.py"), re.M r'__version__ = "([\d\.]+)"', read("marionette_harness", "__init__.py"), re.M
)[0] )[0]

View file

@ -8,8 +8,8 @@
import re import re
from operator import itemgetter from operator import itemgetter
RE_DOCSHELL = re.compile("I\/DocShellAndDOMWindowLeak ([+\-]{2})DOCSHELL") RE_DOCSHELL = re.compile(r"I\/DocShellAndDOMWindowLeak ([+\-]{2})DOCSHELL")
RE_DOMWINDOW = re.compile("I\/DocShellAndDOMWindowLeak ([+\-]{2})DOMWINDOW") RE_DOMWINDOW = re.compile(r"I\/DocShellAndDOMWindowLeak ([+\-]{2})DOMWINDOW")
class ShutdownLeaks(object): class ShutdownLeaks(object):
@ -233,7 +233,7 @@ class ShutdownLeaks(object):
self.hiddenDocShellsCount += 1 self.hiddenDocShellsCount += 1
def _parseValue(self, line, name): def _parseValue(self, line, name):
match = re.search("\[%s = (.+?)\]" % name, line) match = re.search(r"\[%s = (.+?)\]" % name, line)
if match: if match:
return match.group(1) return match.group(1)
return None return None
@ -329,17 +329,17 @@ class LSANLeaks(object):
) )
self.startRegExp = re.compile( self.startRegExp = re.compile(
"==\d+==ERROR: LeakSanitizer: detected memory leaks" r"==\d+==ERROR: LeakSanitizer: detected memory leaks"
) )
self.fatalErrorRegExp = re.compile( self.fatalErrorRegExp = re.compile(
"==\d+==LeakSanitizer has encountered a fatal error." r"==\d+==LeakSanitizer has encountered a fatal error."
) )
self.symbolizerOomRegExp = re.compile( self.symbolizerOomRegExp = re.compile(
"LLVMSymbolizer: error reading file: Cannot allocate memory" "LLVMSymbolizer: error reading file: Cannot allocate memory"
) )
self.stackFrameRegExp = re.compile(" #\d+ 0x[0-9a-f]+ in ([^(</]+)") self.stackFrameRegExp = re.compile(r" #\d+ 0x[0-9a-f]+ in ([^(</]+)")
self.sysLibStackFrameRegExp = re.compile( self.sysLibStackFrameRegExp = re.compile(
" #\d+ 0x[0-9a-f]+ \(([^+]+)\+0x[0-9a-f]+\)" r" #\d+ 0x[0-9a-f]+ \(([^+]+)\+0x[0-9a-f]+\)"
) )
def log(self, line, path=""): def log(self, line, path=""):

View file

@ -11,7 +11,7 @@ LOG = get_proxy_logger("profiler")
# Precompiled regex for validating lib names # Precompiled regex for validating lib names
# Empty lib name means client couldn't associate frame with any lib # Empty lib name means client couldn't associate frame with any lib
gLibNameRE = re.compile("[0-9a-zA-Z_+\-\.]*$") gLibNameRE = re.compile(r"[0-9a-zA-Z_+\-\.]*$")
# Maximum number of times a request can be forwarded to a different server # Maximum number of times a request can be forwarded to a different server
# for symbolication. Also prevents loops. # for symbolication. Also prevents loops.

View file

@ -85,7 +85,7 @@ class TestViewGeckoProfile(unittest.TestCase):
"https://profiler.firefox.com/from-url/" "https://profiler.firefox.com/from-url/"
"http%3A%2F%2F127.0.0.1%3A{PORT}%2Ffakeprofile.json" "http%3A%2F%2F127.0.0.1%3A{PORT}%2Ffakeprofile.json"
) )
actual_url = re.sub("%3A\d+%2F", "%3A{PORT}%2F", self.firefox_profiler_url) actual_url = re.sub(r"%3A\d+%2F", "%3A{PORT}%2F", self.firefox_profiler_url)
self.assertEqual( self.assertEqual(
actual_url, actual_url,

View file

@ -45,7 +45,7 @@ def test_filelist(httpd, docroot, path):
filelist = os.listdir(docroot) filelist = os.listdir(docroot)
pattern = "\<[a-zA-Z0-9\-\_\.\=\"'\/\\\%\!\@\#\$\^\&\*\(\) :;]*\>" pattern = r"""\<[a-zA-Z0-9\-\_\.\="'\/\\%\!\@\#\$\^\&\*\(\) :;]*\>"""
for line in f.readlines(): for line in f.readlines():
subbed_lined = re.sub(pattern, "", ensure_str(line).strip("\n")) subbed_lined = re.sub(pattern, "", ensure_str(line).strip("\n"))

View file

@ -60,20 +60,20 @@ class LSANLeaks(object):
) )
self.startRegExp = re.compile( self.startRegExp = re.compile(
"==\d+==ERROR: LeakSanitizer: detected memory leaks" r"==\d+==ERROR: LeakSanitizer: detected memory leaks"
) )
self.fatalErrorRegExp = re.compile( self.fatalErrorRegExp = re.compile(
"==\d+==LeakSanitizer has encountered a fatal error." r"==\d+==LeakSanitizer has encountered a fatal error."
) )
self.symbolizerOomRegExp = re.compile( self.symbolizerOomRegExp = re.compile(
"LLVMSymbolizer: error reading file: Cannot allocate memory" "LLVMSymbolizer: error reading file: Cannot allocate memory"
) )
self.stackFrameRegExp = re.compile(" #\d+ 0x[0-9a-f]+ in ([^(</]+)") self.stackFrameRegExp = re.compile(r" #\d+ 0x[0-9a-f]+ in ([^(</]+)")
self.sysLibStackFrameRegExp = re.compile( self.sysLibStackFrameRegExp = re.compile(
" #\d+ 0x[0-9a-f]+ \(([^+]+)\+0x[0-9a-f]+\)" r" #\d+ 0x[0-9a-f]+ \(([^+]+)\+0x[0-9a-f]+\)"
) )
self.summaryRegexp = re.compile( self.summaryRegexp = re.compile(
"SUMMARY: AddressSanitizer: (\d+) byte\(s\) leaked in (\d+) allocation\(s\)." r"SUMMARY: AddressSanitizer: (\d+) byte\(s\) leaked in (\d+) allocation\(s\)."
) )
self.rustRegexp = re.compile("::h[a-f0-9]{16}$") self.rustRegexp = re.compile("::h[a-f0-9]{16}$")
self.setAllowed(allowed) self.setAllowed(allowed)

View file

@ -21,8 +21,8 @@ def ip_addresses():
# Regex to match IPv4 addresses. # Regex to match IPv4 addresses.
# 0-255.0-255.0-255.0-255, note order is important here. # 0-255.0-255.0-255.0-255, note order is important here.
regexip = re.compile( regexip = re.compile(
"((25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)\.){3}" r"((25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)\.){3}"
"(25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)" r"(25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|\d)"
) )
commands = ( commands = (

View file

@ -260,7 +260,7 @@ def _maybe_update_host_utils(build_obj):
# Compare, prompt, update # Compare, prompt, update
if existing_version and manifest_version: if existing_version and manifest_version:
hu_version_regex = "host-utils-([\d\.]*)" hu_version_regex = r"host-utils-([\d\.]*)"
manifest_version = float(re.search(hu_version_regex, manifest_version).group(1)) manifest_version = float(re.search(hu_version_regex, manifest_version).group(1))
existing_version = float(re.search(hu_version_regex, existing_version).group(1)) existing_version = float(re.search(hu_version_regex, existing_version).group(1))
if existing_version < manifest_version: if existing_version < manifest_version:

View file

@ -17,8 +17,8 @@ DESKTOP_VISUALFX_THEME = {
"Custom": 3, "Custom": 3,
}.get("Best appearance") }.get("Best appearance")
TASKBAR_AUTOHIDE_REG_PATH = { TASKBAR_AUTOHIDE_REG_PATH = {
"Windows 7": "HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\StuckRects2", "Windows 7": r"HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\StuckRects2",
"Windows 10": "HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\StuckRects3", "Windows 10": r"HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\StuckRects3",
}.get("{} {}".format(platform.system(), platform.release())) }.get("{} {}".format(platform.system(), platform.release()))
##### #####
@ -99,7 +99,7 @@ config = {
"cmd": [ "cmd": [
"powershell", "powershell",
"-command", "-command",
"\"&{{&Set-ItemProperty -Path 'HKCU:Software\Microsoft\Windows\CurrentVersion\Explorer\VisualEffects' -Name VisualFXSetting -Value {}}}\"".format( "\"&{{&Set-ItemProperty -Path 'HKCU:Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\VisualEffects' -Name VisualFXSetting -Value {}}}\"".format(
DESKTOP_VISUALFX_THEME DESKTOP_VISUALFX_THEME
), ),
], ],
@ -112,7 +112,7 @@ config = {
"cmd": [ "cmd": [
"powershell", "powershell",
"-command", "-command",
"New-ItemProperty -Path 'HKCU:\Control Panel\Accessibility' -Name 'DynamicScrollbars' -Value 0", r"New-ItemProperty -Path 'HKCU:\Control Panel\Accessibility' -Name 'DynamicScrollbars' -Value 0",
], ],
"architectures": ["32bit", "64bit"], "architectures": ["32bit", "64bit"],
"halt_on_failure": False, "halt_on_failure": False,

View file

@ -20,8 +20,8 @@ DESKTOP_VISUALFX_THEME = {
"Custom": 3, "Custom": 3,
}.get("Best appearance") }.get("Best appearance")
TASKBAR_AUTOHIDE_REG_PATH = { TASKBAR_AUTOHIDE_REG_PATH = {
"Windows 7": "HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\StuckRects2", "Windows 7": r"HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\StuckRects2",
"Windows 10": "HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\StuckRects3", "Windows 10": r"HKCU:SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\StuckRects3",
}.get("{} {}".format(platform.system(), platform.release())) }.get("{} {}".format(platform.system(), platform.release()))
##### #####
@ -85,7 +85,7 @@ config = {
"cmd": [ "cmd": [
"powershell", "powershell",
"-command", "-command",
"\"&{{&Set-ItemProperty -Path 'HKCU:Software\Microsoft\Windows\CurrentVersion\Explorer\VisualEffects' -Name VisualFXSetting -Value {}}}\"".format( "\"&{{&Set-ItemProperty -Path 'HKCU:Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\VisualEffects' -Name VisualFXSetting -Value {}}}\"".format(
DESKTOP_VISUALFX_THEME DESKTOP_VISUALFX_THEME
), ),
], ],
@ -98,7 +98,7 @@ config = {
"cmd": [ "cmd": [
"powershell", "powershell",
"-command", "-command",
"New-ItemProperty -Path 'HKCU:\Control Panel\Accessibility' -Name 'DynamicScrollbars' -Value 0", r"New-ItemProperty -Path 'HKCU:\Control Panel\Accessibility' -Name 'DynamicScrollbars' -Value 0",
], ],
"architectures": ["32bit", "64bit"], "architectures": ["32bit", "64bit"],
"halt_on_failure": False, "halt_on_failure": False,

View file

@ -94,7 +94,7 @@ class ActionsConfigExample(BaseScript):
def _ship1(self): def _ship1(self):
self.info( self.info(
""" r"""
_~ _~
_~ )_)_~ _~ )_)_~
)_))_))_) )_))_))_)
@ -106,7 +106,7 @@ class ActionsConfigExample(BaseScript):
def _ship2(self): def _ship2(self):
self.info( self.info(
""" r"""
_4 _4 _4 _4
_)_))_) _)_))_)
_)_)_)_) _)_)_)_)

View file

@ -407,7 +407,7 @@ def _docheckout(
ui.warn(b"(shared store does not exist; deleting destination)\n") ui.warn(b"(shared store does not exist; deleting destination)\n")
with timeit("removed_missing_shared_store", "remove-wdir"): with timeit("removed_missing_shared_store", "remove-wdir"):
destvfs.rmtree(forcibly=True) destvfs.rmtree(forcibly=True)
elif not re.search(b"[a-f0-9]{40}/\.hg$", storepath.replace(b"\\", b"/")): elif not re.search(rb"[a-f0-9]{40}/\.hg$", storepath.replace(b"\\", b"/")):
ui.warn( ui.warn(
b"(shared store does not belong to pooled storage; " b"(shared store does not belong to pooled storage; "
b"deleting destination to improve efficiency)\n" b"deleting destination to improve efficiency)\n"

View file

@ -56,7 +56,7 @@ def _autoconfig_path(fx_install_dir):
def _cfg_file_path(fx_install_dir): def _cfg_file_path(fx_install_dir):
""" r"""
Windows: defaults\pref Windows: defaults\pref
Mac: Firefox.app/Contents/Resources/defaults/pref Mac: Firefox.app/Contents/Resources/defaults/pref
Linux: defaults/pref Linux: defaults/pref

View file

@ -363,7 +363,7 @@ class OpenH264Build(TransferMixin, VCSScript, TooltoolMixin):
to_package = [] to_package = []
for f in glob.glob(os.path.join(srcdir, "*gmpopenh264*")): for f in glob.glob(os.path.join(srcdir, "*gmpopenh264*")):
if not re.search( if not re.search(
"(?:lib)?gmpopenh264(?!\.\d)\.(?:dylib|so|dll|info)(?!\.\d)", f r"(?:lib)?gmpopenh264(?!\.\d)\.(?:dylib|so|dll|info)(?!\.\d)", f
): ):
# Don't package unnecessary zip bloat # Don't package unnecessary zip bloat
# Blocks things like libgmpopenh264.2.dylib and libgmpopenh264.so.1 # Blocks things like libgmpopenh264.2.dylib and libgmpopenh264.so.1

View file

@ -85,7 +85,7 @@ class Allowlist:
if len(parts) >= 2: if len(parts) >= 2:
filename = "%s%s" % (parts[0], new_name) filename = "%s%s" % (parts[0], new_name)
return filename.strip("/\\\ \t") return filename.strip("/\\\\ \t")
def check(self, test, file_name_index, event_source_index=None): def check(self, test, file_name_index, event_source_index=None):
errors = {} errors = {}

View file

@ -311,7 +311,7 @@ class BrowserLogResults(object):
# regular expression for responsiveness results # regular expression for responsiveness results
RESULTS_RESPONSIVENESS_REGEX = re.compile( RESULTS_RESPONSIVENESS_REGEX = re.compile(
"MOZ_EVENT_TRACE\ssample\s\d*?\s(\d*\.?\d*)$", re.DOTALL | re.MULTILINE r"MOZ_EVENT_TRACE\ssample\s\d*?\s(\d*\.?\d*)$", re.DOTALL | re.MULTILINE
) )
# classes for results types # classes for results types

View file

@ -221,7 +221,7 @@ def trackProcess(row, firstFirefoxPID):
parentPID = int(row[PARENT_PID_INDEX]) parentPID = int(row[PARENT_PID_INDEX])
if parentPID == firstFirefoxPID: if parentPID == firstFirefoxPID:
proc = row[PROCESS_INDEX] proc = row[PROCESS_INDEX]
gBrowserPID = int(re.search("^.* \(\s*(\d+)\)$", proc).group(1)) gBrowserPID = int(re.search(r"^.* \(\s*(\d+)\)$", proc).group(1))
def getBrowserPID(): def getBrowserPID():
@ -232,7 +232,7 @@ def getBrowserPID():
def trackThread(row, browserPID): def trackThread(row, browserPID):
event, proc, tid = row[EVENTNAME_INDEX], row[PROCESS_INDEX], row[THREAD_ID_INDEX] event, proc, tid = row[EVENTNAME_INDEX], row[PROCESS_INDEX], row[THREAD_ID_INDEX]
if event in ["T-DCStart", "T-Start"]: if event in ["T-DCStart", "T-Start"]:
procName, procID = re.search("^(.*) \(\s*(\d+)\)$", proc).group(1, 2) procName, procID = re.search(r"^(.*) \(\s*(\d+)\)$", proc).group(1, 2)
if procID == str(browserPID): if procID == str(browserPID):
imgIdx = getIndex(event, IMAGEFUNC_COL) imgIdx = getIndex(event, IMAGEFUNC_COL)
img = re.match("([^!]+)!", row[imgIdx]).group(1) img = re.match("([^!]+)!", row[imgIdx]).group(1)
@ -267,7 +267,7 @@ def trackThreadNetIO(row, io, stage):
gConnectionIDs[connID] = tid gConnectionIDs[connID] = tid
origThread = gConnectionIDs[connID] origThread = gConnectionIDs[connID]
if origThread in gThreads: if origThread in gThreads:
match = re.match("[\w-]+\/([\w-]+)?", event) match = re.match(r"[\w-]+\/([\w-]+)?", event)
if not match: if not match:
raise xtalos.XTalosError( raise xtalos.XTalosError(
"Could not find a regular expression match for event: {}".format(event) "Could not find a regular expression match for event: {}".format(event)

View file

@ -671,7 +671,7 @@ class SessionStoreWindowRestored(ClassicEvent):
class ProcessStart(XPerfEvent): class ProcessStart(XPerfEvent):
cmd_line_index = None cmd_line_index = None
process_index = None process_index = None
extractor = re.compile("^(.+) \(\s*(\d+)\)$") extractor = re.compile(r"^(.+) \(\s*(\d+)\)$")
def __init__(self, leafname): def __init__(self, leafname):
super(ProcessStart, self).__init__("P-Start") super(ProcessStart, self).__init__("P-Start")
@ -746,7 +746,7 @@ class ThreadStart(XPerfEvent):
process_index = None process_index = None
tid_index = None tid_index = None
pid_extractor = re.compile("^.+ \(\s*(\d+)\)$") pid_extractor = re.compile(r"^.+ \(\s*(\d+)\)$")
def __init__(self): def __init__(self):
super(ThreadStart, self).__init__("T-Start") super(ThreadStart, self).__init__("T-Start")

View file

@ -125,7 +125,7 @@ def main():
else: else:
if sys.platform == "win32": if sys.platform == "win32":
# replace msys-style paths with proper Windows paths # replace msys-style paths with proper Windows paths
m = re.match("^\/\w\/", extensionDir) m = re.match(r"^\/\w\/", extensionDir)
if m: if m:
extensionDir = "%s:/%s" % (m.group(0)[1:2], extensionDir[3:]) extensionDir = "%s:/%s" % (m.group(0)[1:2], extensionDir[3:])
extensionDir = extensionDir.replace("/", "\\") extensionDir = extensionDir.replace("/", "\\")

View file

@ -378,8 +378,8 @@ def add_test_data(logger, wpt_meta, dir_path, test, subtest, test_data):
meta.set(test, subtest, product="firefox", bug_url=bug_link) meta.set(test, subtest, product="firefox", bug_url=bug_link)
bugzilla_re = re.compile("https://bugzilla\.mozilla\.org/show_bug\.cgi\?id=\d+") bugzilla_re = re.compile(r"https://bugzilla\.mozilla\.org/show_bug\.cgi\?id=\d+")
bug_re = re.compile("(?:[Bb][Uu][Gg])?\s*(\d+)") bug_re = re.compile(r"(?:[Bb][Uu][Gg])?\s*(\d+)")
def get_bug_link(value): def get_bug_link(value):

View file

@ -234,7 +234,7 @@ class Browser:
class Geckodriver: class Geckodriver:
PORT_RE = re.compile(b".*Listening on [^ :]*:(\d+)") PORT_RE = re.compile(rb".*Listening on [^ :]*:(\d+)")
def __init__(self, configuration, hostname=None, extra_args=None): def __init__(self, configuration, hostname=None, extra_args=None):
self.config = configuration["webdriver"] self.config = configuration["webdriver"]

View file

@ -38,7 +38,7 @@ for curdir, subdirList, fileList in os.walk(DEFDIR, topdown=True):
except ValueError as e: except ValueError as e:
print("parse of " + theFile + " failed: " + e[0]) print("parse of " + theFile + " failed: " + e[0])
else: else:
theFile = re.sub("\.\./", "", theFile) theFile = re.sub(r"\.\./", "", theFile)
defList.append(theFile) defList.append(theFile)
if (len(defList)): if (len(defList)):
@ -75,7 +75,7 @@ for curdir, subdirList, fileList in os.walk(TESTTREE, topdown=True):
templateFile = autoTemplate templateFile = autoTemplate
suffix = ".html" suffix = ".html"
rfile = re.sub("\.\./", "", file) rfile = re.sub(r"\.\./", "", file)
# interesting pattern is {{TESTFILE}} # interesting pattern is {{TESTFILE}}
tcopy = re.sub("{{TESTFILE}}", rfile, templateFile) tcopy = re.sub("{{TESTFILE}}", rfile, templateFile)
@ -88,7 +88,7 @@ for curdir, subdirList, fileList in os.walk(TESTTREE, topdown=True):
tcopy = re.sub("{{TESTTITLE}}", title, tcopy) tcopy = re.sub("{{TESTTITLE}}", title, tcopy)
# target file is basename of theFile + '-manual.html' # target file is basename of theFile + '-manual.html'
target = re.sub("\.test",suffix, theFile) target = re.sub(r"\.test",suffix, theFile)
try: try:
out = open(target, "w") out = open(target, "w")

View file

@ -680,7 +680,7 @@ def generate_tests(testcase, tentative):
html_testcase_markup = template_testcase_markup.format(url_wptserve_sub) html_testcase_markup = template_testcase_markup.format(url_wptserve_sub)
html_nonspeculative_testcase_markup = template_nonspeculative_testcase_markup.format(url_wptserve_sub) html_nonspeculative_testcase_markup = template_nonspeculative_testcase_markup.format(url_wptserve_sub)
js_testcase_markup = template_testcase_markup.format(url_js_sub).replace(u"</script>", u"<\/script>").replace(u"<meta charset", u"<meta\ charset") js_testcase_markup = template_testcase_markup.format(url_js_sub).replace(u"</script>", u"<\\/script>").replace(u"<meta charset", u"<meta\\ charset")
if test_nonspeculative == u'true': if test_nonspeculative == u'true':
nonspeculative = template_nonspeculative.format(preamble=preamble, encoding_decl=encoding_decl, title=title, nonspeculative_testcase_markup=html_nonspeculative_testcase_markup, delay=delay) nonspeculative = template_nonspeculative.format(preamble=preamble, encoding_decl=encoding_decl, title=title, nonspeculative_testcase_markup=html_nonspeculative_testcase_markup, delay=delay)

View file

@ -51,7 +51,7 @@ class ReplaceRequirements(object):
with open(path) as f: with open(path) as f:
parser.read_file(f) parser.read_file(f)
deps = parser.get("testenv", "deps") deps = parser.get("testenv", "deps")
dep_re = re.compile("(?:.*:\s*)?-r(.*)") dep_re = re.compile(r"(?:.*:\s*)?-r(.*)")
# This can break if we start using more features of tox # This can break if we start using more features of tox
for dep in deps.splitlines(): for dep in deps.splitlines():

View file

@ -157,13 +157,13 @@ class GeckoCommitMessage(CommitMessage):
# than just enforce a general pattern. # than just enforce a general pattern.
_bug_re = re.compile( _bug_re = re.compile(
"^Bug (\d+)[^\w]*(?:Part \d+[^\w]*)?(.*?)\s*(?:r=(\w*))?$", re.IGNORECASE r"^Bug (\d+)[^\w]*(?:Part \d+[^\w]*)?(.*?)\s*(?:r=(\w*))?$", re.IGNORECASE
) )
_backout_re = re.compile( _backout_re = re.compile(
"^(?:Back(?:ing|ed)\s+out)|Backout|(?:Revert|(?:ed|ing))", re.IGNORECASE r"^(?:Back(?:ing|ed)\s+out)|Backout|(?:Revert|(?:ed|ing))", re.IGNORECASE
) )
_backout_sha1_re = re.compile("(?:\s|\:)(0-9a-f){12}") _backout_sha1_re = re.compile(r"(?:\s|\:)(0-9a-f){12}")
def _parse_message(self): def _parse_message(self):
CommitMessage._parse_message(self) CommitMessage._parse_message(self)

View file

@ -174,7 +174,7 @@ class LoadCommits(Step):
) )
update_regexp = re.compile( update_regexp = re.compile(
"Bug \d+ - Update web-platform-tests to revision [0-9a-f]{40}" r"Bug \d+ - Update web-platform-tests to revision [0-9a-f]{40}"
) )
state.has_backouts = False state.has_backouts = False

View file

@ -125,7 +125,7 @@ def config_file(request):
@pytest.fixture @pytest.fixture
def bug_number(request): def bug_number(request):
return re.findall("\d+", str(request.fspath.basename))[0] return re.findall(r"\d+", str(request.fspath.basename))[0]
@pytest.fixture @pytest.fixture

View file

@ -112,7 +112,7 @@ add_test(function test_loop () {
}); });
""" """
PASSING_TEST_UNICODE = b""" PASSING_TEST_UNICODE = rb"""
function run_test () { run_next_test(); } function run_test () { run_next_test(); }
add_test(function test_unicode_print () { add_test(function test_unicode_print () {
@ -659,8 +659,8 @@ prefs = [
self.assertInLog("###!!! ASSERTION") self.assertInLog("###!!! ASSERTION")
log_lines = self.log.getvalue().splitlines() log_lines = self.log.getvalue().splitlines()
line_pat = "#\d\d:" line_pat = r"#\d\d:"
unknown_pat = "#\d\d\: \?\?\?\[.* \+0x[a-f0-9]+\]" unknown_pat = r"#\d\d\: \?\?\?\[.* \+0x[a-f0-9]+\]"
self.assertFalse( self.assertFalse(
any(re.search(unknown_pat, line) for line in log_lines), any(re.search(unknown_pat, line) for line in log_lines),
"An stack frame without symbols was found in\n%s" "An stack frame without symbols was found in\n%s"

View file

@ -578,7 +578,7 @@ class Dumper:
# MODULE os cpu guid debug_file # MODULE os cpu guid debug_file
(guid, debug_file) = (module_line.split())[3:5] (guid, debug_file) = (module_line.split())[3:5]
# strip off .pdb extensions, and append .sym # strip off .pdb extensions, and append .sym
sym_file = re.sub("\.pdb$", "", debug_file) + ".sym" sym_file = re.sub(r"\.pdb$", "", debug_file) + ".sym"
# we do want forward slashes here # we do want forward slashes here
rel_path = os.path.join(debug_file, guid, sym_file).replace("\\", "/") rel_path = os.path.join(debug_file, guid, sym_file).replace("\\", "/")
full_path = os.path.normpath(os.path.join(self.symbol_path, rel_path)) full_path = os.path.normpath(os.path.join(self.symbol_path, rel_path))

View file

@ -18,7 +18,7 @@ class MinGWCapitalization(LineType):
super(MinGWCapitalization, self).__init__(*args, **kwargs) super(MinGWCapitalization, self).__init__(*args, **kwargs)
with open(HEADERS_FILE, "r") as fh: with open(HEADERS_FILE, "r") as fh:
self.headers = fh.read().strip().splitlines() self.headers = fh.read().strip().splitlines()
self.regex = re.compile("^#include\s*<(" + "|".join(self.headers) + ")>") self.regex = re.compile(r"^#include\s*<(" + "|".join(self.headers) + ")>")
def condition(self, payload, line, config): def condition(self, payload, line, config):
if not line.startswith("#include"): if not line.startswith("#include"):

View file

@ -265,8 +265,8 @@ class RaptorGatherer(FrameworkGatherer):
sub_title = key.replace("_", " ") sub_title = key.replace("_", " ")
if key == "test_url": if key == "test_url":
if "<" in description[key] or ">" in description[key]: if "<" in description[key] or ">" in description[key]:
description[key] = description[key].replace("<", "\<") description[key] = description[key].replace("<", r"\<")
description[key] = description[key].replace(">", "\>") description[key] = description[key].replace(">", r"\>")
result += f" * **{sub_title}**: `<{description[key]}>`__\n" result += f" * **{sub_title}**: `<{description[key]}>`__\n"
elif key == "secondary_url": elif key == "secondary_url":
result += f" * **{sub_title}**: `<{description[key]}>`__\n" result += f" * **{sub_title}**: `<{description[key]}>`__\n"

View file

@ -101,7 +101,7 @@ class Verifier(object):
): ):
"""Determine if a target name (from a YAML) matches with a test.""" """Determine if a target name (from a YAML) matches with a test."""
tb = os.path.basename(target_test_name) tb = os.path.basename(target_test_name)
tb = re.sub("\..*", "", tb) tb = re.sub(r"\..*", "", tb)
if test_name == tb: if test_name == tb:
# Found an exact match for the test_name # Found an exact match for the test_name
return True return True
@ -169,7 +169,7 @@ class Verifier(object):
for test_name, test_info in test_list.items(): for test_name, test_info in test_list.items():
manifest_path = test_info.get("path", test_info.get("manifest", "")) manifest_path = test_info.get("path", test_info.get("manifest", ""))
tb = os.path.basename(manifest_path) tb = os.path.basename(manifest_path)
tb = re.sub("\..*", "", tb) tb = re.sub(r"\..*", "", tb)
if ( if (
stests.get(tb, None) is not None stests.get(tb, None) is not None
or stests.get(test_name, None) is not None or stests.get(test_name, None) is not None

View file

@ -15,7 +15,9 @@ from mozlint import result
from mozlint.pathutils import get_ancestors_by_name from mozlint.pathutils import get_ancestors_by_name
from mozlint.util.implementation import LintProcess from mozlint.util.implementation import LintProcess
YAMLLINT_FORMAT_REGEX = re.compile("(.*):(.*):(.*): \[(error|warning)\] (.*) \((.*)\)$") YAMLLINT_FORMAT_REGEX = re.compile(
r"(.*):(.*):(.*): \[(error|warning)\] (.*) \((.*)\)$"
)
results = [] results = []

View file

@ -16,7 +16,7 @@ from subprocess import PIPE, Popen
# Matches lines produced by MozFormatCodeAddress(), e.g. # Matches lines produced by MozFormatCodeAddress(), e.g.
# `#01: ???[tests/example +0x43a0]`. # `#01: ???[tests/example +0x43a0]`.
line_re = re.compile("#\d+: .+\[.+ \+0x[0-9A-Fa-f]+\]") line_re = re.compile(r"#\d+: .+\[.+ \+0x[0-9A-Fa-f]+\]")
fix_stacks = None fix_stacks = None