Bug 1786490 - reformat the tree using black 23.3.0 r=sylvestre,webdriver-reviewers,taskgraph-reviewers,whimboo,jmaher,ahal,perftest-reviewers,geckoview-reviewers,afinder,m_kato

# ignore-this-changeset

Differential Revision: https://phabricator.services.mozilla.com/D186092
This commit is contained in:
Logan Rosen 2023-09-06 16:14:30 +00:00
parent 51f5fcdd75
commit f024d94b2b
290 changed files with 777 additions and 840 deletions

View file

@ -14,7 +14,6 @@ if sys.version_info[0] < 3:
class MetaPathFinder(object): class MetaPathFinder(object):
pass pass
else: else:
from importlib.abc import MetaPathFinder from importlib.abc import MetaPathFinder

View file

@ -58,6 +58,7 @@ set_config("MOZ_REPLACE_MALLOC_STATIC", replace_malloc_static)
# PHC (Probabilistic Heap Checker) # PHC (Probabilistic Heap Checker)
# ============================================================== # ==============================================================
# In general, it only makes sense for PHC to run on the platforms that have a # In general, it only makes sense for PHC to run on the platforms that have a
# crash reporter. # crash reporter.
@depends( @depends(

View file

@ -83,6 +83,7 @@ def pkgconf_requires_private(system_nspr, nspr_minver):
set_config("PKGCONF_REQUIRES_PRIVATE", pkgconf_requires_private) set_config("PKGCONF_REQUIRES_PRIVATE", pkgconf_requires_private)
# pkg_check_modules takes care of NSPR_CFLAGS and NSPR_LIBS when using --with-system-nspr. # pkg_check_modules takes care of NSPR_CFLAGS and NSPR_LIBS when using --with-system-nspr.
@depends(build_environment, c_compiler, fold_libs, when=build_nspr) @depends(build_environment, c_compiler, fold_libs, when=build_nspr)
def nspr_config(build_env, c_compiler, fold_libs): def nspr_config(build_env, c_compiler, fold_libs):

View file

@ -1033,6 +1033,7 @@ def sysroot(host_or_target, target_sysroot=None):
opt = "--with-host-sysroot" opt = "--with-host-sysroot"
env = "HOST_SYSROOT" env = "HOST_SYSROOT"
when = depends(host)(lambda h: h.kernel == "Linux") when = depends(host)(lambda h: h.kernel == "Linux")
# Only bootstrap a host sysroot when using a bootstrapped target sysroot # Only bootstrap a host sysroot when using a bootstrapped target sysroot
# or when the target doesn't use a bootstrapped sysroot in the first place. # or when the target doesn't use a bootstrapped sysroot in the first place.
@depends(when, bootstrap_target_when, target_sysroot.bootstrapped) @depends(when, bootstrap_target_when, target_sysroot.bootstrapped)
@ -1731,7 +1732,6 @@ def select_linker_tmpl(host_or_target):
@imports("os") @imports("os")
@imports("shutil") @imports("shutil")
def select_linker(linker, c_compiler, developer_options, toolchain_flags, target): def select_linker(linker, c_compiler, developer_options, toolchain_flags, target):
if linker: if linker:
linker = linker[0] linker = linker[0]
else: else:
@ -2500,6 +2500,7 @@ def cet_ldflags(c_compiler, target):
set_config("MOZ_CETCOMPAT_LDFLAGS", cet_ldflags) set_config("MOZ_CETCOMPAT_LDFLAGS", cet_ldflags)
# Frame pointers # Frame pointers
# ============================================================== # ==============================================================
@depends(c_compiler) @depends(c_compiler)
@ -2785,6 +2786,7 @@ add_old_configure_assignment(
"ENABLE_MOZSEARCH_PLUGIN", depends_if("--enable-mozsearch-plugin")(lambda _: True) "ENABLE_MOZSEARCH_PLUGIN", depends_if("--enable-mozsearch-plugin")(lambda _: True)
) )
# Libstdc++ compatibility hacks # Libstdc++ compatibility hacks
# ============================================================== # ==============================================================
# #
@ -2966,6 +2968,7 @@ add_old_configure_assignment("LIBFUZZER_FLAGS", libfuzzer_flags.use_flags)
# Shared library building # Shared library building
# ============================================================== # ==============================================================
# XXX: The use of makefile constructs in these variables is awful. # XXX: The use of makefile constructs in these variables is awful.
@depends(target, c_compiler) @depends(target, c_compiler)
def make_shared_library(target, compiler): def make_shared_library(target, compiler):

View file

@ -4,6 +4,7 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this # License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/. # file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Updater # Updater
# ============================================================== # ==============================================================
@depends(build_project) @depends(build_project)

View file

@ -60,7 +60,6 @@ def scan_directory(path):
sys.exit(1) sys.exit(1)
for file in path.rglob("*"): for file in path.rglob("*"):
if not file.is_file(): if not file.is_file():
continue continue

View file

@ -208,7 +208,7 @@ def main():
if f in ignored_files and (f, 2) in functions: if f in ignored_files and (f, 2) in functions:
fail(f"There should be only one {f} file") fail(f"There should be only one {f} file")
for (filename, n) in sorted(functions): for filename, n in sorted(functions):
for fn in functions[(filename, n)]: for fn in functions[(filename, n)]:
# An allocation is present in a non-special file. Fail! # An allocation is present in a non-special file. Fail!
fail("'" + fn + "' present in " + filename) fail("'" + fn + "' present in " + filename)

View file

@ -219,7 +219,7 @@ class _MockOpen(_MockBaseOpen):
content = six.ensure_binary(content or b"") content = six.ensure_binary(content or b"")
return MockedBytesFile(self, name, content) return MockedBytesFile(self, name, content)
else: else:
content = six.ensure_text(content or u"") content = six.ensure_text(content or "")
return MockedStringFile(self, name, content) return MockedStringFile(self, name, content)

View file

@ -146,9 +146,9 @@ class TestNsinstall(unittest.TestCase):
@unittest.skipIf(not RUN_NON_ASCII_TESTS, "Skipping non ascii tests") @unittest.skipIf(not RUN_NON_ASCII_TESTS, "Skipping non ascii tests")
def test_nsinstall_non_ascii(self): def test_nsinstall_non_ascii(self):
"Test that nsinstall handles non-ASCII files" "Test that nsinstall handles non-ASCII files"
filename = u"\u2325\u3452\u2415\u5081" filename = "\u2325\u3452\u2415\u5081"
testfile = self.touch(filename) testfile = self.touch(filename)
testdir = self.mkdirs(u"\u4241\u1D04\u1414") testdir = self.mkdirs("\u4241\u1D04\u1414")
self.assertEqual( self.assertEqual(
nsinstall([testfile.encode("utf-8"), testdir.encode("utf-8")]), 0 nsinstall([testfile.encode("utf-8"), testdir.encode("utf-8")]), 0
) )
@ -162,9 +162,9 @@ class TestNsinstall(unittest.TestCase):
) )
def test_nsinstall_non_ascii_subprocess(self): def test_nsinstall_non_ascii_subprocess(self):
"Test that nsinstall as a subprocess handles non-ASCII files" "Test that nsinstall as a subprocess handles non-ASCII files"
filename = u"\u2325\u3452\u2415\u5081" filename = "\u2325\u3452\u2415\u5081"
testfile = self.touch(filename) testfile = self.touch(filename)
testdir = self.mkdirs(u"\u4241\u1D04\u1414") testdir = self.mkdirs("\u4241\u1D04\u1414")
# We don't use subprocess because it can't handle Unicode on # We don't use subprocess because it can't handle Unicode on
# Windows <http://bugs.python.org/issue1759845>. mozprocess calls # Windows <http://bugs.python.org/issue1759845>. mozprocess calls
# CreateProcessW directly so it's perfect. # CreateProcessW directly so it's perfect.

View file

@ -4,6 +4,7 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this # License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/. # file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Put the content of `filenames[0]` file into `output` file pointer # Put the content of `filenames[0]` file into `output` file pointer
def main(output, *filenames): def main(output, *filenames):
with open(filenames[0], "r", encoding="utf-8") as f: with open(filenames[0], "r", encoding="utf-8") as f:

View file

@ -381,7 +381,7 @@ def fill(template, **args):
t, argModList = compile_fill_template(template) t, argModList = compile_fill_template(template)
# Now apply argModList to args # Now apply argModList to args
for (name, modified_name, depth) in argModList: for name, modified_name, depth in argModList:
if not (args[name] == "" or args[name].endswith("\n")): if not (args[name] == "" or args[name].endswith("\n")):
raise ValueError( raise ValueError(
"Argument %s with value %r is missing a newline" % (name, args[name]) "Argument %s with value %r is missing a newline" % (name, args[name])
@ -4372,7 +4372,7 @@ def InitUnforgeablePropertiesOnHolder(
(defineUnforgeableAttrs, properties.unforgeableAttrs), (defineUnforgeableAttrs, properties.unforgeableAttrs),
(defineUnforgeableMethods, properties.unforgeableMethods), (defineUnforgeableMethods, properties.unforgeableMethods),
] ]
for (template, array) in unforgeableMembers: for template, array in unforgeableMembers:
if array.hasNonChromeOnly(): if array.hasNonChromeOnly():
unforgeables.append(CGGeneric(template % array.variableName(False))) unforgeables.append(CGGeneric(template % array.variableName(False)))
if array.hasChromeOnly(): if array.hasChromeOnly():
@ -13021,7 +13021,6 @@ class CGUnionStruct(CGThing):
return self.type.getDeps() return self.type.getDeps()
def getStruct(self): def getStruct(self):
members = [ members = [
ClassMember("mType", "TypeOrUninit", body="eUninitialized"), ClassMember("mType", "TypeOrUninit", body="eUninitialized"),
ClassMember("mValue", "Value"), ClassMember("mValue", "Value"),
@ -17065,7 +17064,6 @@ class CGDescriptor(CGThing):
class CGNamespacedEnum(CGThing): class CGNamespacedEnum(CGThing):
def __init__(self, namespace, enumName, names, values, comment=""): def __init__(self, namespace, enumName, names, values, comment=""):
if not values: if not values:
values = [] values = []
@ -23425,7 +23423,6 @@ class GlobalGenRoots:
@staticmethod @staticmethod
def PrototypeList(config): def PrototypeList(config):
# Prototype ID enum. # Prototype ID enum.
descriptorsWithPrototype = config.getDescriptors( descriptorsWithPrototype = config.getDescriptors(
hasInterfacePrototypeObject=True hasInterfacePrototypeObject=True
@ -23613,7 +23610,6 @@ class GlobalGenRoots:
@staticmethod @staticmethod
def RegisterBindings(config): def RegisterBindings(config):
curr = CGNamespace.build( curr = CGNamespace.build(
["mozilla", "dom"], CGGlobalNames(config.windowGlobalNames) ["mozilla", "dom"], CGGlobalNames(config.windowGlobalNames)
) )
@ -23641,7 +23637,6 @@ class GlobalGenRoots:
@staticmethod @staticmethod
def RegisterWorkerBindings(config): def RegisterWorkerBindings(config):
curr = CGRegisterWorkerBindings(config) curr = CGRegisterWorkerBindings(config)
# Wrap all of that in our namespaces. # Wrap all of that in our namespaces.
@ -23668,7 +23663,6 @@ class GlobalGenRoots:
@staticmethod @staticmethod
def RegisterWorkerDebuggerBindings(config): def RegisterWorkerDebuggerBindings(config):
curr = CGRegisterWorkerDebuggerBindings(config) curr = CGRegisterWorkerDebuggerBindings(config)
# Wrap all of that in our namespaces. # Wrap all of that in our namespaces.
@ -23695,7 +23689,6 @@ class GlobalGenRoots:
@staticmethod @staticmethod
def RegisterWorkletBindings(config): def RegisterWorkletBindings(config):
curr = CGRegisterWorkletBindings(config) curr = CGRegisterWorkletBindings(config)
# Wrap all of that in our namespaces. # Wrap all of that in our namespaces.
@ -23722,7 +23715,6 @@ class GlobalGenRoots:
@staticmethod @staticmethod
def RegisterShadowRealmBindings(config): def RegisterShadowRealmBindings(config):
curr = CGRegisterShadowRealmBindings(config) curr = CGRegisterShadowRealmBindings(config)
# Wrap all of that in our namespaces. # Wrap all of that in our namespaces.

View file

@ -248,7 +248,7 @@ class Configuration(DescriptorProvider):
for m in t.flatMemberTypes: for m in t.flatMemberTypes:
addUnions(m) addUnions(m)
for (t, _) in getAllTypes(self.descriptors, self.dictionaries, self.callbacks): for t, _ in getAllTypes(self.descriptors, self.dictionaries, self.callbacks):
addUnions(t) addUnions(t)
for d in getDictionariesConvertedToJS( for d in getDictionariesConvertedToJS(
@ -440,7 +440,7 @@ class Configuration(DescriptorProvider):
name, name,
) )
) )
for (k, v) in firstExtAttrs.items(): for k, v in firstExtAttrs.items():
if extAttrs[k] != v: if extAttrs[k] != v:
raise TypeError( raise TypeError(
"%s on %s and %s on %s have different values for extended attribute %s, but they're using the same template %s." "%s on %s and %s on %s have different values for extended attribute %s, but they're using the same template %s."

View file

@ -947,7 +947,6 @@ class IDLInterfaceMixin(IDLInterfaceOrInterfaceMixinOrNamespace):
def validate(self): def validate(self):
for member in self.members: for member in self.members:
if member.isAttr(): if member.isAttr():
if member.inherit: if member.inherit:
raise WebIDLError( raise WebIDLError(
@ -3165,7 +3164,7 @@ class IDLUnionType(IDLType):
return "MaybeShared" + type.name return "MaybeShared" + type.name
return type.name return type.name
for (i, type) in enumerate(self.memberTypes): for i, type in enumerate(self.memberTypes):
if not type.isComplete(): if not type.isComplete():
self.memberTypes[i] = type.complete(scope) self.memberTypes[i] = type.complete(scope)
@ -3206,7 +3205,7 @@ class IDLUnionType(IDLType):
continue continue
i += 1 i += 1
for (i, t) in enumerate(self.flatMemberTypes[:-1]): for i, t in enumerate(self.flatMemberTypes[:-1]):
for u in self.flatMemberTypes[i + 1 :]: for u in self.flatMemberTypes[i + 1 :]:
if not t.isDistinguishableFrom(u): if not t.isDistinguishableFrom(u):
raise WebIDLError( raise WebIDLError(
@ -3619,7 +3618,6 @@ class IDLPromiseType(IDLParametrizedType):
class IDLBuiltinType(IDLType): class IDLBuiltinType(IDLType):
Types = enum( Types = enum(
# The integer types # The integer types
"byte", "byte",
@ -4287,7 +4285,7 @@ class IDLValue(IDLObject):
) )
elif self.type.isInteger() and type.isFloat(): elif self.type.isInteger() and type.isFloat():
# Convert an integer literal into float # Convert an integer literal into float
if -(2 ** 24) <= self.value <= 2 ** 24: if -(2**24) <= self.value <= 2**24:
return IDLValue(self.location, type, float(self.value)) return IDLValue(self.location, type, float(self.value))
else: else:
raise WebIDLError( raise WebIDLError(
@ -4468,7 +4466,6 @@ class IDLUndefinedValue(IDLObject):
class IDLInterfaceMember(IDLObjectWithIdentifier, IDLExposureMixins): class IDLInterfaceMember(IDLObjectWithIdentifier, IDLExposureMixins):
Tags = enum( Tags = enum(
"Const", "Attr", "Method", "MaplikeOrSetlike", "AsyncIterable", "Iterable" "Const", "Attr", "Method", "MaplikeOrSetlike", "AsyncIterable", "Iterable"
) )
@ -5791,7 +5788,7 @@ class IDLAttribute(IDLInterfaceMember):
"CrossOriginWritable", "CrossOriginWritable",
"SetterThrows", "SetterThrows",
] ]
for (key, value) in self._extendedAttrDict.items(): for key, value in self._extendedAttrDict.items():
if key in allowedExtAttrs: if key in allowedExtAttrs:
if value is not True: if value is not True:
raise WebIDLError( raise WebIDLError(
@ -5968,7 +5965,7 @@ class IDLCallback(IDLObjectWithScope):
IDLObjectWithScope.__init__(self, location, parentScope, identifier) IDLObjectWithScope.__init__(self, location, parentScope, identifier)
for (returnType, arguments) in self.signatures(): for returnType, arguments in self.signatures():
for argument in arguments: for argument in arguments:
argument.resolve(self) argument.resolve(self)
@ -6109,7 +6106,6 @@ class IDLMethodOverload:
class IDLMethod(IDLInterfaceMember, IDLScope): class IDLMethod(IDLInterfaceMember, IDLScope):
Special = enum( Special = enum(
"Getter", "Setter", "Deleter", "LegacyCaller", base=IDLInterfaceMember.Special "Getter", "Setter", "Deleter", "LegacyCaller", base=IDLInterfaceMember.Special
) )
@ -6292,7 +6288,7 @@ class IDLMethod(IDLInterfaceMember, IDLScope):
assert isinstance(parentScope, IDLScope) assert isinstance(parentScope, IDLScope)
IDLObjectWithIdentifier.resolve(self, parentScope) IDLObjectWithIdentifier.resolve(self, parentScope)
IDLScope.__init__(self, self.location, parentScope, self.identifier) IDLScope.__init__(self, self.location, parentScope, self.identifier)
for (returnType, arguments) in self.signatures(): for returnType, arguments in self.signatures():
for argument in arguments: for argument in arguments:
argument.resolve(self) argument.resolve(self)
@ -6435,7 +6431,7 @@ class IDLMethod(IDLInterfaceMember, IDLScope):
variadicArgument = None variadicArgument = None
arguments = overload.arguments arguments = overload.arguments
for (idx, argument) in enumerate(arguments): for idx, argument in enumerate(arguments):
assert argument.type.isComplete() assert argument.type.isComplete()
if ( if (
@ -6568,8 +6564,8 @@ class IDLMethod(IDLInterfaceMember, IDLScope):
def distinguishingIndexForArgCount(self, argc): def distinguishingIndexForArgCount(self, argc):
def isValidDistinguishingIndex(idx, signatures): def isValidDistinguishingIndex(idx, signatures):
for (firstSigIndex, (firstRetval, firstArgs)) in enumerate(signatures[:-1]): for firstSigIndex, (firstRetval, firstArgs) in enumerate(signatures[:-1]):
for (secondRetval, secondArgs) in signatures[firstSigIndex + 1 :]: for secondRetval, secondArgs in signatures[firstSigIndex + 1 :]:
if idx < len(firstArgs): if idx < len(firstArgs):
firstType = firstArgs[idx].type firstType = firstArgs[idx].type
else: else:

View file

@ -337,7 +337,7 @@ def WebIDLTest(parser, harness):
), ),
] ]
for (name, template) in TEMPLATES: for name, template in TEMPLATES:
parser = parser.reset() parser = parser.reset()
threw = False threw = False
try: try:
@ -346,7 +346,7 @@ def WebIDLTest(parser, harness):
except Exception: except Exception:
threw = True threw = True
harness.ok(not threw, "Template for %s parses without attributes" % name) harness.ok(not threw, "Template for %s parses without attributes" % name)
for (attribute, type) in ATTRIBUTES: for attribute, type in ATTRIBUTES:
parser = parser.reset() parser = parser.reset()
threw = False threw = False
try: try:

View file

@ -94,7 +94,7 @@ def WebIDLTest(parser, harness):
""" """
) )
results = parser.finish() results = parser.finish()
for (i, iface) in enumerate(results): for i, iface in enumerate(results):
harness.check( harness.check(
iface.isSingleOperationInterface(), iface.isSingleOperationInterface(),
i < 4, i < 4,

View file

@ -64,7 +64,7 @@ def WebIDLTest(parser, harness):
len(iface.members), len(expected), "Expect %s members" % len(expected) len(iface.members), len(expected), "Expect %s members" % len(expected)
) )
for (const, (QName, name, type, value)) in zip(iface.members, expected): for const, (QName, name, type, value) in zip(iface.members, expected):
harness.ok(isinstance(const, WebIDL.IDLConst), "Should be an IDLConst") harness.ok(isinstance(const, WebIDL.IDLConst), "Should be an IDLConst")
harness.ok(const.isConst(), "Const is a const") harness.ok(const.isConst(), "Const is a const")
harness.ok(not const.isAttr(), "Const is not an attr") harness.ok(not const.isAttr(), "Const is not an attr")

View file

@ -87,7 +87,7 @@ def WebIDLTest(parser, harness):
) )
sigpairs = zip(method.signatures(), signatures) sigpairs = zip(method.signatures(), signatures)
for (gotSignature, expectedSignature) in sigpairs: for gotSignature, expectedSignature in sigpairs:
(gotRetType, gotArgs) = gotSignature (gotRetType, gotArgs) = gotSignature
(expectedRetType, expectedArgs) = expectedSignature (expectedRetType, expectedArgs) = expectedSignature

View file

@ -55,7 +55,7 @@ def WebIDLTest(parser, harness):
method = iface.members[6] method = iface.members[6]
harness.ok(isinstance(method, WebIDL.IDLMethod), "Should be an IDLMethod") harness.ok(isinstance(method, WebIDL.IDLMethod), "Should be an IDLMethod")
argtypes = [a.type for a in method.signatures()[0][1]] argtypes = [a.type for a in method.signatures()[0][1]]
for (idx, type) in enumerate(argtypes): for idx, type in enumerate(argtypes):
harness.ok(type.isFloat(), "Type %d should be float" % idx) harness.ok(type.isFloat(), "Type %d should be float" % idx)
harness.check( harness.check(
type.isUnrestricted(), type.isUnrestricted(),

View file

@ -90,7 +90,7 @@ def WebIDLTest(parser, harness):
) )
sigpairs = zip(method.signatures(), signatures) sigpairs = zip(method.signatures(), signatures)
for (gotSignature, expectedSignature) in sigpairs: for gotSignature, expectedSignature in sigpairs:
(gotRetType, gotArgs) = gotSignature (gotRetType, gotArgs) = gotSignature
(expectedRetType, expectedArgs) = expectedSignature (expectedRetType, expectedArgs) = expectedSignature

View file

@ -4,7 +4,6 @@
def WebIDLTest(parser, harness): def WebIDLTest(parser, harness):
# Test dictionary as inner type # Test dictionary as inner type
harness.should_throw( harness.should_throw(
parser, parser,

View file

@ -1,5 +1,4 @@
def WebIDLTest(parser, harness): def WebIDLTest(parser, harness):
parser.parse( parser.parse(
""" """
interface Foo {}; interface Foo {};

View file

@ -89,7 +89,7 @@ def WebIDLTest(parser, harness):
interface PrepareForTest { interface PrepareForTest {
""" """
) )
for (i, type) in enumerate(types): for i, type in enumerate(types):
interface += string.Template( interface += string.Template(
""" """
readonly attribute ${type} attr${i}; readonly attribute ${type} attr${i};
@ -155,7 +155,7 @@ def WebIDLTest(parser, harness):
interface TestUnion { interface TestUnion {
""" """
) )
for (i, type) in enumerate(validUnionTypes): for i, type in enumerate(validUnionTypes):
interface += string.Template( interface += string.Template(
""" """
undefined method${i}(${type} arg); undefined method${i}(${type} arg);

View file

@ -140,7 +140,6 @@ class CachesDeleteCleanupAtShutdownTestCase(MarionetteTestCase):
beforeUsage = self.getUsage() beforeUsage = self.getUsage()
def ensureCleanCallback(): def ensureCleanCallback():
Wait(self.marionette, timeout=60).until( Wait(self.marionette, timeout=60).until(
lambda x: (self.getUsage() - beforeUsage) lambda x: (self.getUsage() - beforeUsage)
< EXPECTED_CACHEDIR_SIZE_AFTER_CLEANUP, < EXPECTED_CACHEDIR_SIZE_AFTER_CLEANUP,

View file

@ -510,7 +510,7 @@ def LoadErrata():
ret = {} ret = {}
for (sectionName, (sectionLineNum, sectionMap)) in iniMap.items(): for sectionName, (sectionLineNum, sectionMap) in iniMap.items():
curLines = [] curLines = []
if sectionName is None: if sectionName is None:
@ -521,7 +521,7 @@ def LoadErrata():
sectionLineNum, sectionName sectionLineNum, sectionName
) )
for (key, (lineNum, val)) in sectionMap.items(): for key, (lineNum, val) in sectionMap.items():
assert key in ACCEPTABLE_ERRATA_KEYS, "Line {}: {}".format(lineNum, key) assert key in ACCEPTABLE_ERRATA_KEYS, "Line {}: {}".format(lineNum, key)
curLine = "{} = {}".format(key, val) curLine = "{} = {}".format(key, val)

View file

@ -7,7 +7,6 @@
include("/ipc/chromium/chromium-config.mozbuild") include("/ipc/chromium/chromium-config.mozbuild")
if CONFIG["OS_TARGET"] != "WINNT": if CONFIG["OS_TARGET"] != "WINNT":
if CONFIG["OS_TARGET"] != "Android": if CONFIG["OS_TARGET"] != "Android":
SOURCES += [ SOURCES += [
"ice_unittest.cpp", "ice_unittest.cpp",

View file

@ -81,7 +81,7 @@ def web_socket_transfer_data(request):
msgutil.send_message(request, six.ensure_text(resp)) msgutil.send_message(request, six.ensure_text(resp))
elif request.ws_protocol == "test-12": elif request.ws_protocol == "test-12":
msg = msgutil.receive_message(request) msg = msgutil.receive_message(request)
if msg == u"a\ufffdb": if msg == "a\ufffdb":
# converted unpaired surrogate in UTF-16 to UTF-8 OK # converted unpaired surrogate in UTF-16 to UTF-8 OK
msgutil.send_message(request, "SUCCESS") msgutil.send_message(request, "SUCCESS")
else: else:

View file

@ -485,7 +485,7 @@ def export_target(target_full_name) -> Set[str]:
append_arr(lines, "LOCAL_INCLUDES", fixup_paths(desc["include_dirs"])) append_arr(lines, "LOCAL_INCLUDES", fixup_paths(desc["include_dirs"]))
append_arr_commented(lines, "CXXFLAGS", cxxflags) append_arr_commented(lines, "CXXFLAGS", cxxflags)
for (config, v) in sorted_items(sources_by_config): for config, v in sorted_items(sources_by_config):
indent = 0 indent = 0
if config: if config:
lines.append("if {}:".format(config)) lines.append("if {}:".format(config))
@ -506,7 +506,7 @@ def export_target(target_full_name) -> Set[str]:
append_arr(lines, "OS_LIBS", os_libs) append_arr(lines, "OS_LIBS", os_libs)
append_arr_commented(lines, "LDFLAGS", ldflags) append_arr_commented(lines, "LDFLAGS", ldflags)
for (k, v) in sorted(extras.items()): for k, v in sorted(extras.items()):
lines.append("{} = {}".format(k, v)) lines.append("{} = {}".format(k, v))
lib_type = desc["type"] lib_type = desc["type"]

View file

@ -35,7 +35,7 @@ f.close
offsets = [] offsets = []
length = 10 + 11 * len(vsdict) length = 10 + 11 * len(vsdict)
for (k, mappings) in sorted(vsdict.items()): for k, mappings in sorted(vsdict.items()):
offsets.append(length) offsets.append(length)
length += 4 + 5 * len(mappings) length += 4 + 5 * len(mappings)
@ -69,10 +69,10 @@ for i, k in enumerate(sorted(vsdict.keys())):
" U24(0x%04X), U32(0), U32(%d), // varSelectorRecord[%d]\n" " U24(0x%04X), U32(0), U32(%d), // varSelectorRecord[%d]\n"
% (k, offsets[i], i) % (k, offsets[i], i)
) )
for (k, mappings) in sorted(vsdict.items()): for k, mappings in sorted(vsdict.items()):
f.write(" // 0x%04X\n" % k) f.write(" // 0x%04X\n" % k)
f.write(" U32(%d), // numUVSMappings\n" % len(mappings)) f.write(" U32(%d), // numUVSMappings\n" % len(mappings))
for (unified, compat) in sorted(mappings.items()): for unified, compat in sorted(mappings.items()):
f.write(" U24(0x%04X), GLYPH(0x%04X),\n" % (unified, compat)) f.write(" U24(0x%04X), GLYPH(0x%04X),\n" % (unified, compat))
f.write( f.write(
"""}; """};

View file

@ -46,7 +46,6 @@ if CONFIG["MOZ_WIDGET_TOOLKIT"] == "android":
"android/AndroidHal.cpp", "android/AndroidHal.cpp",
] ]
elif CONFIG["OS_TARGET"] == "Linux": elif CONFIG["OS_TARGET"] == "Linux":
UNIFIED_SOURCES += [ UNIFIED_SOURCES += [
"fallback/FallbackScreenConfiguration.cpp", "fallback/FallbackScreenConfiguration.cpp",
"fallback/FallbackSensor.cpp", "fallback/FallbackSensor.cpp",

View file

@ -234,7 +234,7 @@ enum IPCMessages {
) )
for protocol in sorted(allmessages.keys()): for protocol in sorted(allmessages.keys()):
for (msg, num) in allmessages[protocol].idnums: for msg, num in allmessages[protocol].idnums:
if num: if num:
print(" %s = %s," % (msg, num), file=ipc_msgtype_name) print(" %s = %s," % (msg, num), file=ipc_msgtype_name)
elif not msg.endswith("End"): elif not msg.endswith("End"):
@ -256,7 +256,7 @@ const char* StringFromIPCMessageType(uint32_t aMessageType)
) )
for protocol in sorted(allmessages.keys()): for protocol in sorted(allmessages.keys()):
for (msg, num) in allmessages[protocol].idnums: for msg, num in allmessages[protocol].idnums:
if num or msg.endswith("End"): if num or msg.endswith("End"):
continue continue
print( print(

View file

@ -64,9 +64,9 @@ class LowerToCxx:
def hashfunc(value): def hashfunc(value):
h = hash_str(value) % 2 ** 32 h = hash_str(value) % 2**32
if h < 0: if h < 0:
h += 2 ** 32 h += 2**32
return h return h
@ -2275,7 +2275,7 @@ class _ParamTraits:
# After non-pod data, bulk read/write pod data in member order. This has # After non-pod data, bulk read/write pod data in member order. This has
# to be done after the result has been constructed, so that we have # to be done after the result has been constructed, so that we have
# somewhere to read into. # somewhere to read into.
for (size, fields) in itertools.groupby( for size, fields in itertools.groupby(
sd.fields_member_order(), lambda f: pod_size(f.ipdltype) sd.fields_member_order(), lambda f: pod_size(f.ipdltype)
): ):
if size != pod_size_sentinel: if size != pod_size_sentinel:
@ -2517,7 +2517,7 @@ class _ComputeTypeDeps(TypeVisitor):
def _fieldStaticAssertions(sd): def _fieldStaticAssertions(sd):
staticasserts = [] staticasserts = []
for (size, fields) in itertools.groupby( for size, fields in itertools.groupby(
sd.fields_member_order(), lambda f: pod_size(f.ipdltype) sd.fields_member_order(), lambda f: pod_size(f.ipdltype)
): ):
if size == pod_size_sentinel: if size == pod_size_sentinel:

View file

@ -1721,7 +1721,6 @@ class CheckTypes(TcheckVisitor):
) )
if mtype.compress and (not mtype.isAsync() or mtype.isCtor() or mtype.isDtor()): if mtype.compress and (not mtype.isAsync() or mtype.isCtor() or mtype.isDtor()):
if mtype.isCtor() or mtype.isDtor(): if mtype.isCtor() or mtype.isDtor():
message_type = "constructor" if mtype.isCtor() else "destructor" message_type = "constructor" if mtype.isCtor() else "destructor"
error_message = ( error_message = (

View file

@ -20,11 +20,11 @@ from subprocess import check_call
topsrcdir, chromiumtree, rev = sys.argv[1:] topsrcdir, chromiumtree, rev = sys.argv[1:]
if not os.path.exists(os.path.join(topsrcdir, "client.py")): if not os.path.exists(os.path.join(topsrcdir, "client.py")):
print >>sys.stderr, "Incorrect topsrcdir" print >> sys.stderr, "Incorrect topsrcdir"
sys.exit(1) sys.exit(1)
if not os.path.exists(os.path.join(chromiumtree, "src/DEPS")): if not os.path.exists(os.path.join(chromiumtree, "src/DEPS")):
print >>sys.stderr, "Incorrect chromium directory, missing DEPS" print >> sys.stderr, "Incorrect chromium directory, missing DEPS"
sys.exit(1) sys.exit(1)
check_call(["gclient", "sync", "--force", "--revision=src@%s" % rev], cwd=chromiumtree) check_call(["gclient", "sync", "--force", "--revision=src@%s" % rev], cwd=chromiumtree)

View file

@ -26,6 +26,7 @@ building_ffi = depends(system_ffi)(lambda v: v is None)
set_config("MOZ_SYSTEM_FFI", depends_if(system_ffi)(lambda _: True)) set_config("MOZ_SYSTEM_FFI", depends_if(system_ffi)(lambda _: True))
# Target selection, based on ffi/configure.ac. # Target selection, based on ffi/configure.ac.
@depends(target, when=building_ffi) @depends(target, when=building_ffi)
def ffi_target(target): def ffi_target(target):

View file

@ -173,6 +173,7 @@ def enable_decorators(value):
set_config("ENABLE_DECORATORS", enable_decorators) set_config("ENABLE_DECORATORS", enable_decorators)
set_define("ENABLE_DECORATORS", enable_decorators) set_define("ENABLE_DECORATORS", enable_decorators)
# JIT support # JIT support
# ======================================================= # =======================================================
@depends(target, "--enable-record-tuple") @depends(target, "--enable-record-tuple")
@ -506,6 +507,7 @@ set_config("JS_MASM_VERBOSE", depends_if("--enable-masm-verbose")(lambda _: True
# Architecture feature flags # Architecture feature flags
# ======================================================= # =======================================================
# Apple silicon does not seem to have any way to query the OS for the JSCVT # Apple silicon does not seem to have any way to query the OS for the JSCVT
# flag stored in the ID_AA64ISAR1_EL1 system register. In the mean time, we # flag stored in the ID_AA64ISAR1_EL1 system register. In the mean time, we
# hard code the value of the JSCVT flag which guards the implementation of # hard code the value of the JSCVT flag which guards the implementation of
@ -521,6 +523,7 @@ option(
help="{Enable|Disable} static use of FJCVTZS instruction on Aarch64 targets.", help="{Enable|Disable} static use of FJCVTZS instruction on Aarch64 targets.",
) )
# The "ARM Architecture Reference Manual" for ARMv8 defines the JSCVT flag as # The "ARM Architecture Reference Manual" for ARMv8 defines the JSCVT flag as
# being a 4 bit integer (D12.2.52) and it can be manipulated using >= operator # being a 4 bit integer (D12.2.52) and it can be manipulated using >= operator
# (D12.1.4). # (D12.1.4).
@ -549,6 +552,7 @@ def has_pthread_jit_write_protect_np(target):
# JIT code write protection. # JIT code write protection.
set_define("JS_USE_APPLE_FAST_WX", True, when=has_pthread_jit_write_protect_np) set_define("JS_USE_APPLE_FAST_WX", True, when=has_pthread_jit_write_protect_np)
# CTypes # CTypes
# ======================================================= # =======================================================
@depends(js_standalone) @depends(js_standalone)

View file

@ -99,7 +99,7 @@ def writeMappingsVar(println, mapping, name, description, source, url):
println("") println("")
writeMappingHeader(println, description, source, url) writeMappingHeader(println, description, source, url)
println("var {0} = {{".format(name)) println("var {0} = {{".format(name))
for (key, value) in sorted(mapping.items(), key=itemgetter(0)): for key, value in sorted(mapping.items(), key=itemgetter(0)):
println(' "{0}": "{1}",'.format(key, value)) println(' "{0}": "{1}",'.format(key, value))
println("};") println("};")
@ -173,7 +173,7 @@ def writeMappingsBinarySearchBody(
# for the binary search, which only performs a single |memcmp| for multiple # for the binary search, which only performs a single |memcmp| for multiple
# of two subtag lengths. # of two subtag lengths.
mappings_keys = mappings.keys() if type(mappings) == dict else mappings mappings_keys = mappings.keys() if type(mappings) == dict else mappings
for (length, subtags) in groupby(sorted(mappings_keys, key=len), len): for length, subtags in groupby(sorted(mappings_keys, key=len), len):
# Omit the length check if the current length is the maximum length. # Omit the length check if the current length is the maximum length.
if length != tag_maxlength: if length != tag_maxlength:
println( println(
@ -318,7 +318,7 @@ void mozilla::intl::Locale::PerformComplexLanguageMappings() {
# Merge duplicate language entries. # Merge duplicate language entries.
language_aliases = {} language_aliases = {}
for (deprecated_language, (language, script, region)) in sorted( for deprecated_language, (language, script, region) in sorted(
complex_language_mappings.items(), key=itemgetter(0) complex_language_mappings.items(), key=itemgetter(0)
): ):
key = (language, script, region) key = (language, script, region)
@ -328,7 +328,7 @@ void mozilla::intl::Locale::PerformComplexLanguageMappings() {
language_aliases[key].append(deprecated_language) language_aliases[key].append(deprecated_language)
first_language = True first_language = True
for (deprecated_language, (language, script, region)) in sorted( for deprecated_language, (language, script, region) in sorted(
complex_language_mappings.items(), key=itemgetter(0) complex_language_mappings.items(), key=itemgetter(0)
): ):
key = (language, script, region) key = (language, script, region)
@ -422,7 +422,7 @@ void mozilla::intl::Locale::PerformComplexRegionMappings() {
# Merge duplicate region entries. # Merge duplicate region entries.
region_aliases = {} region_aliases = {}
for (deprecated_region, (default, non_default_replacements)) in sorted( for deprecated_region, (default, non_default_replacements) in sorted(
complex_region_mappings.items(), key=itemgetter(0) complex_region_mappings.items(), key=itemgetter(0)
): ):
key = hash_key(default, non_default_replacements) key = hash_key(default, non_default_replacements)
@ -432,7 +432,7 @@ void mozilla::intl::Locale::PerformComplexRegionMappings() {
region_aliases[key].append(deprecated_region) region_aliases[key].append(deprecated_region)
first_region = True first_region = True
for (deprecated_region, (default, non_default_replacements)) in sorted( for deprecated_region, (default, non_default_replacements) in sorted(
complex_region_mappings.items(), key=itemgetter(0) complex_region_mappings.items(), key=itemgetter(0)
): ):
key = hash_key(default, non_default_replacements) key = hash_key(default, non_default_replacements)
@ -587,7 +587,7 @@ bool mozilla::intl::Locale::PerformVariantMappings() {
) )
) )
for (deprecated_variant, (type, replacement)) in sorted( for deprecated_variant, (type, replacement) in sorted(
with_alias, key=itemgetter(0) with_alias, key=itemgetter(0)
): ):
println( println(
@ -730,7 +730,7 @@ bool mozilla::intl::Locale::UpdateLegacyMappings() {
# Group the mappings by language. # Group the mappings by language.
legacy_mappings_by_language = {} legacy_mappings_by_language = {}
for (type, replacement) in legacy_mappings.items(): for type, replacement in legacy_mappings.items():
(language, _, _, _) = type (language, _, _, _) = type
legacy_mappings_by_language.setdefault(language, {})[type] = replacement legacy_mappings_by_language.setdefault(language, {})[type] = replacement
@ -820,7 +820,7 @@ bool mozilla::intl::Locale::UpdateLegacyMappings() {
def hash_key(mappings): def hash_key(mappings):
return str(sorted(mappings.items(), key=itemgetter(0))) return str(sorted(mappings.items(), key=itemgetter(0)))
for (lang, mappings) in sorted( for lang, mappings in sorted(
legacy_mappings_by_language.items(), key=itemgetter(0) legacy_mappings_by_language.items(), key=itemgetter(0)
): ):
key = hash_key(mappings) key = hash_key(mappings)
@ -848,10 +848,9 @@ bool mozilla::intl::Locale::UpdateLegacyMappings() {
return len(k.split("-")) return len(k.split("-"))
# Alias rules are applied by largest union size first. # Alias rules are applied by largest union size first.
for (size, mappings_by_size) in groupby( for size, mappings_by_size in groupby(
sorted(mappings.items(), key=variant_size, reverse=True), key=variant_size sorted(mappings.items(), key=variant_size, reverse=True), key=variant_size
): ):
# Convert grouper object to dict. # Convert grouper object to dict.
mappings_by_size = dict(mappings_by_size) mappings_by_size = dict(mappings_by_size)
@ -859,7 +858,7 @@ bool mozilla::intl::Locale::UpdateLegacyMappings() {
chain_if = size == 1 chain_if = size == 1
# Alias rules are applied in alphabetical order # Alias rules are applied in alphabetical order
for (variants, r_language) in sorted( for variants, r_language in sorted(
mappings_by_size.items(), key=itemgetter(0) mappings_by_size.items(), key=itemgetter(0)
): ):
sorted_variants = sorted(variants.split("-")) sorted_variants = sorted(variants.split("-"))
@ -868,7 +867,7 @@ bool mozilla::intl::Locale::UpdateLegacyMappings() {
maybe_else = "else " if chain_if and not is_first else "" maybe_else = "else " if chain_if and not is_first else ""
is_first = False is_first = False
for (i, variant) in enumerate(sorted_variants): for i, variant in enumerate(sorted_variants):
println( println(
f""" f"""
{" " * i}{maybe_else}if (auto* {variant} = findVariant("{variant}")) {{ {" " * i}{maybe_else}if (auto* {variant} = findVariant("{variant}")) {{
@ -1106,11 +1105,11 @@ def readSupplementalData(core_file):
# Compute the transitive closure. # Compute the transitive closure.
# Any case which currently doesn't occur in the CLDR sources isn't supported # Any case which currently doesn't occur in the CLDR sources isn't supported
# and will lead to throwing an error. # and will lead to throwing an error.
for (type, replacement) in rules.items(): for type, replacement in rules.items():
(language, script, region, variants) = type (language, script, region, variants) = type
(r_language, r_script, r_region, r_variants) = replacement (r_language, r_script, r_region, r_variants) = replacement
for (i_type, i_replacement) in rules.items(): for i_type, i_replacement in rules.items():
(i_language, i_script, i_region, i_variants) = i_type (i_language, i_script, i_region, i_variants) = i_type
(i_r_language, i_r_script, i_r_region, i_r_variants) = i_replacement (i_r_language, i_r_script, i_r_region, i_r_variants) = i_replacement
@ -1257,7 +1256,7 @@ def readSupplementalData(core_file):
variant_mappings = {} variant_mappings = {}
# Preprocess all rules so we can perform a single lookup per subtag at runtime. # Preprocess all rules so we can perform a single lookup per subtag at runtime.
for (type, replacement) in rules.items(): for type, replacement in rules.items():
(language, script, region, variants) = type (language, script, region, variants) = type
(r_language, r_script, r_region, r_variants) = replacement (r_language, r_script, r_region, r_variants) = replacement
@ -1399,7 +1398,7 @@ def readSupplementalData(core_file):
complex_region_mappings_final = {} complex_region_mappings_final = {}
for (deprecated_region, replacements) in complex_region_mappings.items(): for deprecated_region, replacements in complex_region_mappings.items():
# Find all likely subtag entries which don't already contain a region # Find all likely subtag entries which don't already contain a region
# subtag and whose target region is in the list of replacement regions. # subtag and whose target region is in the list of replacement regions.
region_likely_subtags = [ region_likely_subtags = [
@ -2507,7 +2506,7 @@ def readICULegacyZones(icuDir):
# A handful of non-IANA zones/links are not in icuzones and must be added # A handful of non-IANA zones/links are not in icuzones and must be added
# manually so that we won't invoke ICU with them. # manually so that we won't invoke ICU with them.
for (zone, target) in otherICULegacyLinks().items(): for zone, target in otherICULegacyLinks().items():
if zone in links: if zone in links:
if links[zone] != target: if links[zone] != target:
raise KeyError( raise KeyError(
@ -2712,7 +2711,7 @@ def processTimeZones(
println("// Format:") println("// Format:")
println('// "ZoneName" // ICU-Name [time zone file]') println('// "ZoneName" // ICU-Name [time zone file]')
println("const char* const ianaZonesTreatedAsLinksByICU[] = {") println("const char* const ianaZonesTreatedAsLinksByICU[] = {")
for (zone, icuZone) in incorrectZones: for zone, icuZone in incorrectZones:
println(' "%s", // %s [%s]' % (zone, icuZone, zone.filename)) println(' "%s", // %s [%s]' % (zone, icuZone, zone.filename))
println("};") println("};")
println("") println("")
@ -2726,7 +2725,7 @@ def processTimeZones(
println("};") println("};")
println("") println("")
println("const LinkAndTarget ianaLinksCanonicalizedDifferentlyByICU[] = {") println("const LinkAndTarget ianaLinksCanonicalizedDifferentlyByICU[] = {")
for (zone, target, icuTarget) in incorrectLinks: for zone, target, icuTarget in incorrectLinks:
println( println(
' { "%s", "%s" }, // %s [%s]' ' { "%s", "%s" }, // %s [%s]'
% (zone, target, icuTarget, zone.filename) % (zone, target, icuTarget, zone.filename)
@ -2796,7 +2795,7 @@ const tzMapper = [
println(description) println(description)
println("const links = {") println("const links = {")
for (zone, target) in sorted(links, key=itemgetter(0)): for zone, target in sorted(links, key=itemgetter(0)):
println(' "%s": "%s",' % (zone, target)) println(' "%s": "%s",' % (zone, target))
println("};") println("};")
@ -3150,10 +3149,10 @@ def writeCurrencyFile(published, currencies, out):
*/""" */"""
) )
println("var currencyDigits = {") println("var currencyDigits = {")
for (currency, entries) in groupby( for currency, entries in groupby(
sorted(currencies, key=itemgetter(0)), itemgetter(0) sorted(currencies, key=itemgetter(0)), itemgetter(0)
): ):
for (_, minorUnits, currencyName, countryName) in entries: for _, minorUnits, currencyName, countryName in entries:
println(" // {} ({})".format(currencyName, countryName)) println(" // {} ({})".format(currencyName, countryName))
println(" {}: {},".format(currency, minorUnits)) println(" {}: {},".format(currency, minorUnits))
println("};") println("};")
@ -3319,7 +3318,7 @@ const char* mozilla::intl::Locale::Replace{0}ExtensionType(
# Merge duplicate keys. # Merge duplicate keys.
key_aliases = {} key_aliases = {}
for (key, replacements) in sorted(mapping.items(), key=itemgetter(0)): for key, replacements in sorted(mapping.items(), key=itemgetter(0)):
hash_key = to_hash_key(replacements) hash_key = to_hash_key(replacements)
if hash_key not in key_aliases: if hash_key not in key_aliases:
key_aliases[hash_key] = [] key_aliases[hash_key] = []
@ -3327,7 +3326,7 @@ const char* mozilla::intl::Locale::Replace{0}ExtensionType(
key_aliases[hash_key].append(key) key_aliases[hash_key].append(key)
first_key = True first_key = True
for (key, replacements) in sorted(mapping.items(), key=itemgetter(0)): for key, replacements in sorted(mapping.items(), key=itemgetter(0)):
hash_key = to_hash_key(replacements) hash_key = to_hash_key(replacements)
if key in key_aliases[hash_key]: if key in key_aliases[hash_key]:
continue continue
@ -3368,7 +3367,7 @@ const char* mozilla::intl::Locale::Replace{0}ExtensionType(
) )
) )
else: else:
for (type, replacement) in replacements: for type, replacement in replacements:
println( println(
""" """
if (Is{}Type(type, "{}")) {{ if (Is{}Type(type, "{}")) {{

View file

@ -166,7 +166,7 @@ if __name__ == "__main__":
test_list = find_tests(test_dir) test_list = find_tests(test_dir)
if not test_list: if not test_list:
print >>sys.stderr, "No tests found matching command line arguments." print >> sys.stderr, "No tests found matching command line arguments."
sys.exit(0) sys.exit(0)
test_list = [Test.from_file(tst, name, OPTIONS) for tst, name in test_list] test_list = [Test.from_file(tst, name, OPTIONS) for tst, name in test_list]
@ -178,7 +178,7 @@ if __name__ == "__main__":
except OSError: except OSError:
if not os.path.exists(JS): if not os.path.exists(JS):
print >>sys.stderr, "JS shell argument: file does not exist: '%s'" % JS print >> sys.stderr, "JS shell argument: file does not exist: '%s'" % JS
sys.exit(1) sys.exit(1)
else: else:
raise raise

View file

@ -212,7 +212,7 @@ JOBS = {
# - item is command[j] # - item is command[j]
def out_indexes(command): def out_indexes(command):
i = 0 i = 0
for (j, fragment) in enumerate(command): for j, fragment in enumerate(command):
if isinstance(fragment, Output): if isinstance(fragment, Output):
yield (i, j, fragment) yield (i, j, fragment)
i += 1 i += 1
@ -221,7 +221,7 @@ def out_indexes(command):
def job_command_with_final_output_names(job): def job_command_with_final_output_names(job):
outfiles = job.get("outputs", []) outfiles = job.get("outputs", [])
command = list(job["command"]) command = list(job["command"])
for (i, j, name) in out_indexes(job["command"]): for i, j, name in out_indexes(job["command"]):
command[j] = outfiles[i] command[j] = outfiles[i]
return command return command
@ -256,7 +256,7 @@ def run_job(name, config):
info["redirect"].close() info["redirect"].close()
# Rename the temporary files to their final names. # Rename the temporary files to their final names.
for (temp, final) in info["rename_map"].items(): for temp, final in info["rename_map"].items():
try: try:
if config["verbose"] > 1: if config["verbose"] > 1:
print("Renaming %s -> %s" % (temp, final)) print("Renaming %s -> %s" % (temp, final))
@ -285,7 +285,7 @@ def spawn_command(cmdspec, job, name, config):
# from those temp names to their actual final names that will be used # from those temp names to their actual final names that will be used
# if the command succeeds. # if the command succeeds.
command = list(cmdspec) command = list(cmdspec)
for (i, j, raw_name) in out_indexes(cmdspec): for i, j, raw_name in out_indexes(cmdspec):
[name] = fill([raw_name], config) [name] = fill([raw_name], config)
command[j] = "{}.tmp{}".format(name, config.get("i", "")) command[j] = "{}.tmp{}".format(name, config.get("i", ""))
rename_map[command[j]] = outfiles[i] rename_map[command[j]] = outfiles[i]
@ -305,7 +305,7 @@ def spawn_command(cmdspec, job, name, config):
# Default to conservatively assuming 4GB/job. # Default to conservatively assuming 4GB/job.
def max_parallel_jobs(job_size=4 * 2 ** 30): def max_parallel_jobs(job_size=4 * 2**30):
"""Return the max number of parallel jobs we can run without overfilling """Return the max number of parallel jobs we can run without overfilling
memory, assuming heavyweight jobs.""" memory, assuming heavyweight jobs."""
from_cores = int(subprocess.check_output(["nproc", "--ignore=1"]).strip()) from_cores = int(subprocess.check_output(["nproc", "--ignore=1"]).strip())
@ -434,7 +434,7 @@ for step in steps:
elif "outputs" in job and "command" in job: elif "outputs" in job and "command" in job:
outfiles = job["outputs"] outfiles = job["outputs"]
num_outputs = 0 num_outputs = 0
for (i, j, name) in out_indexes(job["command"]): for i, j, name in out_indexes(job["command"]):
# Trim the {curly brackets} off of the output keys. # Trim the {curly brackets} off of the output keys.
data[name[1:-1]] = outfiles[i] data[name[1:-1]] = outfiles[i]
num_outputs += 1 num_outputs += 1

View file

@ -35,7 +35,7 @@ class Body(dict):
src, dst = edge["Index"] src, dst = edge["Index"]
self["SrcPoint2Edges"][src].append(edge) self["SrcPoint2Edges"][src].append(edge)
self["Line2Edges"] = defaultdict(list) self["Line2Edges"] = defaultdict(list)
for (src, edges) in self["SrcPoint2Edges"].items(): for src, edges in self["SrcPoint2Edges"].items():
line = self["Points"][src] line = self["Points"][src]
self["Line2Edges"][line].extend(edges) self["Line2Edges"][line].extend(edges)

View file

@ -21,5 +21,5 @@ def get_header_length_and_flags(value, cache):
# If we couldn't fetch the length directly, it must be stored # If we couldn't fetch the length directly, it must be stored
# within `flags`. # within `flags`.
length = flags >> 32 length = flags >> 32
flags = flags % 2 ** 32 flags = flags % 2**32
return length, flags return length, flags

View file

@ -120,7 +120,7 @@ class JSValueTypeCache(object):
# the i'th magic value. # the i'th magic value.
d = gdb.types.make_enum_dict(gdb.lookup_type("JSWhyMagic")) d = gdb.types.make_enum_dict(gdb.lookup_type("JSWhyMagic"))
self.magic_names = list(range(max(d.values()) + 1)) self.magic_names = list(range(max(d.values()) + 1))
for (k, v) in d.items(): for k, v in d.items():
self.magic_names[v] = k self.magic_names[v] = k
# Choose an unboxing scheme for this architecture. # Choose an unboxing scheme for this architecture.

View file

@ -137,7 +137,7 @@ def clear_module_printers(module_name):
# should remove. (It's not safe to delete entries from a dictionary # should remove. (It's not safe to delete entries from a dictionary
# while we're iterating over it.) # while we're iterating over it.)
to_delete = [] to_delete = []
for (k, v) in d.items(): for k, v in d.items():
if v.__module__ == module_name: if v.__module__ == module_name:
to_delete.append(k) to_delete.append(k)
remove_from_subprinter_list(v) remove_from_subprinter_list(v)
@ -250,7 +250,6 @@ class TypeCache(object):
def implemented_types(t): def implemented_types(t):
# Yield all types that follow |t|. # Yield all types that follow |t|.
def followers(t): def followers(t):
if t.code == gdb.TYPE_CODE_TYPEDEF: if t.code == gdb.TYPE_CODE_TYPEDEF:
@ -346,7 +345,7 @@ def lookup_for_objfile(objfile):
# to scan the whole list, so regexp printers should be used # to scan the whole list, so regexp printers should be used
# sparingly. # sparingly.
s = str(value.type) s = str(value.type)
for (r, f) in printers_by_regexp: for r, f in printers_by_regexp:
if f.enabled: if f.enabled:
m = r.match(s) m = r.match(s)
if m: if m:

View file

@ -114,7 +114,6 @@ class Summary(object):
self.bar.finish() self.bar.finish()
if self.failures: if self.failures:
print("tests failed:") print("tests failed:")
for test in self.failures: for test in self.failures:
test.show(sys.stdout) test.show(sys.stdout)

View file

@ -92,7 +92,6 @@ def extend_condition(condition, value):
class JitTest: class JitTest:
VALGRIND_CMD = [] VALGRIND_CMD = []
paths = (d for d in os.environ["PATH"].split(os.pathsep)) paths = (d for d in os.environ["PATH"].split(os.pathsep))
valgrinds = (os.path.join(d, "valgrind") for d in paths) valgrinds = (os.path.join(d, "valgrind") for d in paths)
@ -488,7 +487,7 @@ def check_output(out, err, rc, timed_out, test, options):
# Python 3 on Windows interprets process exit codes as unsigned # Python 3 on Windows interprets process exit codes as unsigned
# integers, where Python 2 used to allow signed integers. Account for # integers, where Python 2 used to allow signed integers. Account for
# each possibility here. # each possibility here.
if sys.platform == "win32" and rc in (3 - 2 ** 31, 3 + 2 ** 31): if sys.platform == "win32" and rc in (3 - 2**31, 3 + 2**31):
return True return True
if sys.platform != "win32" and rc == -11: if sys.platform != "win32" and rc == -11:

View file

@ -73,7 +73,7 @@ def init_device(options):
context = MozbuildObject.from_environment() context = MozbuildObject.from_environment()
adb_path = get_adb_path(context) adb_path = get_adb_path(context)
except (ImportError): except ImportError:
adb_path = "adb" adb_path = "adb"
DEVICE = ADBDeviceFactory( DEVICE = ADBDeviceFactory(

View file

@ -80,7 +80,7 @@ class TestResult:
harness_message = "Exit code reported crash" harness_message = "Exit code reported crash"
tests = [] tests = []
else: else:
for (idx, line) in enumerate(stdout): for idx, line in enumerate(stdout):
if line.startswith("WPT OUTPUT: "): if line.startswith("WPT OUTPUT: "):
msg = line[len("WPT OUTPUT: ") :] msg = line[len("WPT OUTPUT: ") :]
data = [output.test.wpt.url] + json.loads(msg) data = [output.test.wpt.url] + json.loads(msg)

View file

@ -118,7 +118,7 @@ def read_input(tasks, timeout):
try: try:
readable, _, _ = select.select(rlist, [], exlist, timeout) readable, _, _ = select.select(rlist, [], exlist, timeout)
except OverflowError: except OverflowError:
print >>sys.stderr, "timeout value", timeout print >> sys.stderr, "timeout value", timeout
raise raise
for fd in readable: for fd in readable:

View file

@ -299,7 +299,7 @@ def insertMeta(source, frontmatter):
lines.append("/*---") lines.append("/*---")
for (key, value) in frontmatter.items(): for key, value in frontmatter.items():
if key in ("description", "info"): if key in ("description", "info"):
lines.append("%s: |" % key) lines.append("%s: |" % key)
lines.append( lines.append(
@ -336,7 +336,6 @@ def findAndCopyIncludes(dirPath, baseDir, includeDir):
# we reach the base directory of shell.js include files. # we reach the base directory of shell.js include files.
# Each directory will have a shell.js file to copy. # Each directory will have a shell.js file to copy.
while relPath: while relPath:
# find the shell.js # find the shell.js
shellFile = os.path.join(baseDir, relPath, "shell.js") shellFile = os.path.join(baseDir, relPath, "shell.js")
@ -367,7 +366,6 @@ def findAndCopyIncludes(dirPath, baseDir, includeDir):
def exportTest262(args): def exportTest262(args):
outDir = os.path.abspath(args.out) outDir = os.path.abspath(args.out)
providedSrcs = args.src providedSrcs = args.src
includeShell = args.exportshellincludes includeShell = args.exportshellincludes
@ -384,15 +382,13 @@ def exportTest262(args):
# Go through each source path # Go through each source path
for providedSrc in providedSrcs: for providedSrc in providedSrcs:
src = os.path.abspath(providedSrc) src = os.path.abspath(providedSrc)
# the basename of the path will be used in case multiple "src" arguments # the basename of the path will be used in case multiple "src" arguments
# are passed in to create an output directory for each "src". # are passed in to create an output directory for each "src".
basename = os.path.basename(src) basename = os.path.basename(src)
# Process all test directories recursively. # Process all test directories recursively.
for (dirPath, _, fileNames) in os.walk(src): for dirPath, _, fileNames in os.walk(src):
# we need to make and get the unique set of includes for this filepath # we need to make and get the unique set of includes for this filepath
includes = [] includes = []
if includeShell: if includeShell:

View file

@ -511,7 +511,7 @@ def process_test262(test262Dir, test262OutDir, strictTests, externManifests):
explicitIncludes[os.path.join("built-ins", "Temporal")] = ["temporalHelpers.js"] explicitIncludes[os.path.join("built-ins", "Temporal")] = ["temporalHelpers.js"]
# Process all test directories recursively. # Process all test directories recursively.
for (dirPath, dirNames, fileNames) in os.walk(testDir): for dirPath, dirNames, fileNames in os.walk(testDir):
relPath = os.path.relpath(dirPath, testDir) relPath = os.path.relpath(dirPath, testDir)
if relPath == ".": if relPath == ".":
continue continue
@ -554,7 +554,7 @@ def process_test262(test262Dir, test262OutDir, strictTests, externManifests):
test262parser, testSource, testName, includeSet, strictTests test262parser, testSource, testName, includeSet, strictTests
) )
for (newFileName, newSource, externRefTest) in convert: for newFileName, newSource, externRefTest in convert:
writeTestFile(test262OutDir, newFileName, newSource) writeTestFile(test262OutDir, newFileName, newSource)
if externRefTest is not None: if externRefTest is not None:

View file

@ -202,7 +202,7 @@ def int_ranges(ints):
"""Yields consecutive ranges (inclusive) from integer values.""" """Yields consecutive ranges (inclusive) from integer values."""
(a, b) = tee(sorted(ints)) (a, b) = tee(sorted(ints))
start = next(b) start = next(b)
for (curr, succ) in zip_longest(a, b): for curr, succ in zip_longest(a, b):
if curr + 1 != succ: if curr + 1 != succ:
yield (start, curr) yield (start, curr)
start = succ start = succ
@ -280,7 +280,7 @@ def process_derived_core_properties(derived_core_properties):
id_start = set() id_start = set()
id_continue = set() id_continue = set()
for (char, prop) in read_derived_core_properties(derived_core_properties): for char, prop in read_derived_core_properties(derived_core_properties):
if prop == "ID_Start": if prop == "ID_Start":
id_start.add(char) id_start.add(char)
if prop == "ID_Continue": if prop == "ID_Continue":
@ -399,7 +399,7 @@ def process_case_folding(case_folding):
folding_tests = [] folding_tests = []
folding_codes = set() folding_codes = set()
for (code, mapping) in read_case_folding(case_folding): for code, mapping in read_case_folding(case_folding):
folding_map[code] = mapping folding_map[code] = mapping
if mapping not in rev_folding_map: if mapping not in rev_folding_map:
@ -466,9 +466,7 @@ def process_special_casing(special_casing, table, index):
(upper, lower, flags) = table[index[code]] (upper, lower, flags) = table[index[code]]
return ((code + lower) & 0xFFFF, (code + upper) & 0xFFFF) return ((code + lower) & 0xFFFF, (code + upper) & 0xFFFF)
for (code, lower, upper, languages, contexts) in read_special_casing( for code, lower, upper, languages, contexts in read_special_casing(special_casing):
special_casing
):
assert code <= MAX_BMP, "Unexpected character outside of BMP: %s" % code assert code <= MAX_BMP, "Unexpected character outside of BMP: %s" % code
assert len(languages) <= 1, "Expected zero or one language ids: %s" % languages assert len(languages) <= 1, "Expected zero or one language ids: %s" % languages
assert len(contexts) <= 1, ( assert len(contexts) <= 1, (
@ -686,7 +684,7 @@ def write_special_casing_methods(unconditional_toupper, codepoint_table, println
def describe_range(ranges, depth): def describe_range(ranges, depth):
indent = depth * " " indent = depth * " "
for (start, end) in ranges: for start, end in ranges:
if start == end: if start == end:
println(indent, "// {}".format(codepoint_table.full_name(start))) println(indent, "// {}".format(codepoint_table.full_name(start)))
else: else:
@ -715,7 +713,7 @@ def write_special_casing_methods(unconditional_toupper, codepoint_table, println
def in_any_range(ranges, spaces): def in_any_range(ranges, spaces):
"""Tests if the input character is included in any of the given ranges.""" """Tests if the input character is included in any of the given ranges."""
lines = [[]] lines = [[]]
for (start, end) in ranges: for start, end in ranges:
expr = in_range(start, end, parenthesize=True) expr = in_range(start, end, parenthesize=True)
line = " || ".join(lines[-1] + [expr]) line = " || ".join(lines[-1] + [expr])
if len(line) < (100 - len(spaces) - len(" ||")): if len(line) < (100 - len(spaces) - len(" ||")):
@ -836,9 +834,7 @@ def write_special_casing_methods(unconditional_toupper, codepoint_table, println
println("{") println("{")
println(" switch(ch) {") println(" switch(ch) {")
for (code, converted) in sorted( for code, converted in sorted(unconditional_toupper.items(), key=itemgetter(0)):
unconditional_toupper.items(), key=itemgetter(0)
):
println( println(
" case {}: return {}; // {}".format( " case {}: return {}; // {}".format(
hexlit(code), len(converted), codepoint_table.name(code) hexlit(code), len(converted), codepoint_table.name(code)
@ -860,9 +856,7 @@ def write_special_casing_methods(unconditional_toupper, codepoint_table, println
println("{") println("{")
println(" switch(ch) {") println(" switch(ch) {")
for (code, converted) in sorted( for code, converted in sorted(unconditional_toupper.items(), key=itemgetter(0)):
unconditional_toupper.items(), key=itemgetter(0)
):
println( println(
" case {}: // {}".format(hexlit(code), codepoint_table.name(code)) " case {}: // {}".format(hexlit(code), codepoint_table.name(code))
) )
@ -1306,7 +1300,7 @@ def make_unicode_file(
println("bool") println("bool")
println("js::unicode::{}(char32_t codePoint)".format(name)) println("js::unicode::{}(char32_t codePoint)".format(name))
println("{") println("{")
for (from_code, to_code) in int_ranges(group_set.keys()): for from_code, to_code in int_ranges(group_set.keys()):
println( println(
" if (codePoint >= 0x{:X} && codePoint <= 0x{:X}) {{ // {} .. {}".format( " if (codePoint >= 0x{:X} && codePoint <= 0x{:X}) {{ // {} .. {}".format(
from_code, from_code,
@ -1381,7 +1375,7 @@ def make_unicode_file(
def getsize(data): def getsize(data):
"""return smallest possible integer size for the given array""" """return smallest possible integer size for the given array"""
maxdata = max(data) maxdata = max(data)
assert maxdata < 2 ** 32 assert maxdata < 2**32
if maxdata < 256: if maxdata < 256:
return 1 return 1
@ -1421,7 +1415,7 @@ def splitbins(t):
for shift in range(maxshift + 1): for shift in range(maxshift + 1):
t1 = [] t1 = []
t2 = [] t2 = []
size = 2 ** shift size = 2**shift
bincache = {} bincache = {}
for i in range(0, len(t), size): for i in range(0, len(t), size):
@ -1445,7 +1439,7 @@ def splitbins(t):
dump(t1, t2, shift, bytes) dump(t1, t2, shift, bytes)
# exhaustively verify that the decomposition is correct # exhaustively verify that the decomposition is correct
mask = 2 ** shift - 1 mask = 2**shift - 1
for i in range(len(t)): for i in range(len(t)):
assert t[i] == t2[(t1[i >> shift] << shift) + (i & mask)] assert t[i] == t2[(t1[i >> shift] << shift) + (i & mask)]
return best return best

View file

@ -139,7 +139,7 @@ class OpcodeInfo:
def find_by_name(list, name): def find_by_name(list, name):
for (n, body) in list: for n, body in list:
if n == name: if n == name:
return body return body

View file

@ -165,13 +165,13 @@ def print_doc(index):
) )
) )
for (category_name, types) in index: for category_name, types in index:
print( print(
'<h3 id="{id}">{name}</h3>'.format( '<h3 id="{id}">{name}</h3>'.format(
name=category_name, id=make_element_id(category_name) name=category_name, id=make_element_id(category_name)
) )
) )
for (type_name, opcodes) in types: for type_name, opcodes in types:
if type_name: if type_name:
print( print(
'<h4 id="{id}">{name}</h4>'.format( '<h4 id="{id}">{name}</h4>'.format(

View file

@ -197,6 +197,7 @@ class Tile:
# 3 4 5 # 3 4 5
# 6 7 8 # 6 7 8
# Compute the source tiles' slice and border-width sizes # Compute the source tiles' slice and border-width sizes
def make_src_tiles(): def make_src_tiles():
tiles = [Tile() for i in range(9)] tiles = [Tile() for i in range(9)]

View file

@ -124,7 +124,6 @@ def makeLookup1():
# build the outline, hmtx and cmap data # build the outline, hmtx and cmap data
cp = baseCodepoint cp = baseCodepoint
for index, tag in enumerate(features): for index, tag in enumerate(features):
# tag.pass # tag.pass
glyphName = "%s.pass" % tag glyphName = "%s.pass" % tag
glyphOrder.append(glyphName) glyphOrder.append(glyphName)
@ -303,7 +302,6 @@ def makeLookup3():
# build the outline, hmtx and cmap data # build the outline, hmtx and cmap data
cp = baseCodepoint cp = baseCodepoint
for index, tag in enumerate(features): for index, tag in enumerate(features):
# tag.pass # tag.pass
glyphName = "%s.pass" % tag glyphName = "%s.pass" % tag
glyphOrder.append(glyphName) glyphOrder.append(glyphName)

View file

@ -20,7 +20,7 @@ import fontforge
# generate a set of fonts, each with our special glyph at one codepoint, # generate a set of fonts, each with our special glyph at one codepoint,
# and nothing else # and nothing else
for codepoint in range(ord("A"), ord("D") + 1): for codepoint in range(ord("A"), ord("D") + 1):
for (mark, width) in [("", 1500), ("2", 1800)]: for mark, width in [("", 1500), ("2", 1800)]:
charname = chr(codepoint) charname = chr(codepoint)
f = fontforge.font() f = fontforge.font()
n = "Mark" + mark + charname n = "Mark" + mark + charname
@ -41,8 +41,8 @@ for codepoint in range(ord("A"), ord("D") + 1):
for codepoint in range(ord("A"), ord("A") + 1): for codepoint in range(ord("A"), ord("A") + 1):
for (mark, width) in [("", 1500), ("2", 1800)]: for mark, width in [("", 1500), ("2", 1800)]:
for (uposname, upos) in [("low", -350), ("high", -50)]: for uposname, upos in [("low", -350), ("high", -50)]:
charname = chr(codepoint) charname = chr(codepoint)
f = fontforge.font() f = fontforge.font()
n = "Mark" + mark + charname n = "Mark" + mark + charname

View file

@ -62,7 +62,7 @@ def generate(output, dataFile):
"const int32_t nsCSSProps::" "const int32_t nsCSSProps::"
"kIDLNameSortPositionTable[eCSSProperty_COUNT] = {\n" "kIDLNameSortPositionTable[eCSSProperty_COUNT] = {\n"
) )
for (p, position) in ps: for p, position in ps:
output.write(" {},\n".format(position)) output.write(" {},\n".format(position))
output.write("};\n\n") output.write("};\n\n")

View file

@ -134,7 +134,7 @@ class ReftestRunner(MozbuildObject):
hyphenation_path = os.path.join(self.topsrcdir, "intl", "locales") hyphenation_path = os.path.join(self.topsrcdir, "intl", "locales")
for (dirpath, dirnames, filenames) in os.walk(hyphenation_path): for dirpath, dirnames, filenames in os.walk(hyphenation_path):
for filename in filenames: for filename in filenames:
if filename.endswith(".dic"): if filename.endswith(".dic"):
args.extraProfileFiles.append(os.path.join(dirpath, filename)) args.extraProfileFiles.append(os.path.join(dirpath, filename))

View file

@ -106,7 +106,6 @@ if sys.version_info[0] == 3:
raise value_.with_traceback(tb_) raise value_.with_traceback(tb_)
raise value_ raise value_
else: else:
exec("def reraise_(tp_, value_, tb_=None):\n raise tp_, value_, tb_\n") exec("def reraise_(tp_, value_, tb_=None):\n raise tp_, value_, tb_\n")
@ -652,13 +651,13 @@ class RefTest(object):
] ]
stepResults = {} stepResults = {}
for (descr, step) in steps: for descr, step in steps:
stepResults[descr] = "not run / incomplete" stepResults[descr] = "not run / incomplete"
startTime = datetime.now() startTime = datetime.now()
maxTime = timedelta(seconds=options.verify_max_time) maxTime = timedelta(seconds=options.verify_max_time)
finalResult = "PASSED" finalResult = "PASSED"
for (descr, step) in steps: for descr, step in steps:
if (datetime.now() - startTime) > maxTime: if (datetime.now() - startTime) > maxTime:
self.log.info("::: Test verification is taking too long: Giving up!") self.log.info("::: Test verification is taking too long: Giving up!")
self.log.info( self.log.info(
@ -730,7 +729,7 @@ class RefTest(object):
# First job is only needs-focus tests. Remaining jobs are # First job is only needs-focus tests. Remaining jobs are
# non-needs-focus and chunked. # non-needs-focus and chunked.
perProcessArgs[0].insert(-1, "--focus-filter-mode=needs-focus") perProcessArgs[0].insert(-1, "--focus-filter-mode=needs-focus")
for (chunkNumber, jobArgs) in enumerate(perProcessArgs[1:], start=1): for chunkNumber, jobArgs in enumerate(perProcessArgs[1:], start=1):
jobArgs[-1:-1] = [ jobArgs[-1:-1] = [
"--focus-filter-mode=non-needs-focus", "--focus-filter-mode=non-needs-focus",
"--total-chunks=%d" % jobsWithoutFocus, "--total-chunks=%d" % jobsWithoutFocus,
@ -770,16 +769,16 @@ class RefTest(object):
# Output the summaries that the ReftestThread filters suppressed. # Output the summaries that the ReftestThread filters suppressed.
summaryObjects = [defaultdict(int) for s in summaryLines] summaryObjects = [defaultdict(int) for s in summaryLines]
for t in threads: for t in threads:
for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines): for summaryObj, (text, categories) in zip(summaryObjects, summaryLines):
threadMatches = t.summaryMatches[text] threadMatches = t.summaryMatches[text]
for (attribute, description) in categories: for attribute, description in categories:
amount = int(threadMatches.group(attribute) if threadMatches else 0) amount = int(threadMatches.group(attribute) if threadMatches else 0)
summaryObj[attribute] += amount summaryObj[attribute] += amount
amount = int(threadMatches.group("total") if threadMatches else 0) amount = int(threadMatches.group("total") if threadMatches else 0)
summaryObj["total"] += amount summaryObj["total"] += amount
print("REFTEST INFO | Result summary:") print("REFTEST INFO | Result summary:")
for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines): for summaryObj, (text, categories) in zip(summaryObjects, summaryLines):
details = ", ".join( details = ", ".join(
[ [
"%d %s" % (summaryObj[attribute], description) "%d %s" % (summaryObj[attribute], description)
@ -863,7 +862,6 @@ class RefTest(object):
valgrindSuppFiles=None, valgrindSuppFiles=None,
**profileArgs **profileArgs
): ):
if cmdargs is None: if cmdargs is None:
cmdargs = [] cmdargs = []
cmdargs = cmdargs[:] cmdargs = cmdargs[:]

View file

@ -9,7 +9,6 @@ LOCAL_INCLUDES += [
] ]
if CONFIG["CPU_ARCH"] == "ppc64" and CONFIG["OS_TARGET"] == "Linux": if CONFIG["CPU_ARCH"] == "ppc64" and CONFIG["OS_TARGET"] == "Linux":
DEFINES["TOOLCHAIN_MISS_ASM_HWCAP_H"] = True DEFINES["TOOLCHAIN_MISS_ASM_HWCAP_H"] = True
SOURCES += [ SOURCES += [

View file

@ -859,6 +859,7 @@ def prettyPrintDmdJson(out, j):
# Code for clamping addresses using conservative pointer analysis. # Code for clamping addresses using conservative pointer analysis.
################################################################## ##################################################################
# Start is the address of the first byte of the block, while end is # Start is the address of the first byte of the block, while end is
# the address of the first byte after the final byte in the block. # the address of the first byte after the final byte in the block.
class AddrRange: class AddrRange:

View file

@ -555,6 +555,7 @@ set_config(
@imports(_from="itertools", _import="chain") @imports(_from="itertools", _import="chain")
def gradle_android_dependencies_tasks(*tasks): def gradle_android_dependencies_tasks(*tasks):
"""Gradle tasks run by |mach android dependencies|.""" """Gradle tasks run by |mach android dependencies|."""
# The union, plus a bit more, of all of the Gradle tasks # The union, plus a bit more, of all of the Gradle tasks
# invoked by the android-* automation jobs. # invoked by the android-* automation jobs.
def withoutGeckoBinaries(task): def withoutGeckoBinaries(task):

View file

@ -329,7 +329,6 @@ def android_geckoview_docs(
javadoc_path, javadoc_path,
upload_message, upload_message,
): ):
tasks = ( tasks = (
command_context.substs["GRADLE_ANDROID_GECKOVIEW_DOCS_ARCHIVE_TASKS"] command_context.substs["GRADLE_ANDROID_GECKOVIEW_DOCS_ARCHIVE_TASKS"]
if archive or upload if archive or upload

View file

@ -479,7 +479,7 @@ class TestGenerateStaticPrefList(unittest.TestCase):
def test_bad(self): def test_bad(self):
"Test various pieces of bad input." "Test various pieces of bad input."
for (input_string, expected) in bad_inputs: for input_string, expected in bad_inputs:
inp = StringIO(input_string) inp = StringIO(input_string)
try: try:
pref_list = yaml.safe_load(inp) pref_list = yaml.safe_load(inp)

View file

@ -956,7 +956,6 @@ def save_cache(build_environment, configure_cache):
@imports("glob") @imports("glob")
@imports(_from="os.path", _import="exists") @imports(_from="os.path", _import="exists")
def config_status_deps(build_env, build_project): def config_status_deps(build_env, build_project):
topsrcdir = build_env.topsrcdir topsrcdir = build_env.topsrcdir
topobjdir = build_env.topobjdir topobjdir = build_env.topobjdir

View file

@ -58,7 +58,6 @@ if CONFIG["OS_TARGET"] == "WINNT":
] ]
if CONFIG["MOZ_WIDGET_TOOLKIT"]: if CONFIG["MOZ_WIDGET_TOOLKIT"]:
if CONFIG["MOZ_MEMORY"] and FORCE_SHARED_LIB: if CONFIG["MOZ_MEMORY"] and FORCE_SHARED_LIB:
pass pass
# TODO: SHARED_LIBRARY_LIBS go here # TODO: SHARED_LIBRARY_LIBS go here

View file

@ -21,7 +21,7 @@ class enumset_printer(object):
return ( return (
("flag", gdb.Value(i).cast(self.enum_type)) ("flag", gdb.Value(i).cast(self.enum_type))
for i in range(0, max_bit) for i in range(0, max_bit)
if ((bitfield & (2 ** i)) != 0) if ((bitfield & (2**i)) != 0)
) )
def to_string(self): def to_string(self):

View file

@ -21,23 +21,23 @@ def process_config(toml_content):
if not new_base: if not new_base:
new_base = b"." # relpath to '.' is '', sadly new_base = b"." # relpath to '.' is '', sadly
base_line = b'\nbasepath = "%s"' % new_base base_line = b'\nbasepath = "%s"' % new_base
content1 = re.sub(br"^\s*basepath\s*=\s*.+", base_line, toml_content, flags=re.M) content1 = re.sub(rb"^\s*basepath\s*=\s*.+", base_line, toml_content, flags=re.M)
# process [[paths]] # process [[paths]]
start = 0 start = 0
content2 = b"" content2 = b""
for m in re.finditer( for m in re.finditer(
br"\[\[\s*paths\s*\]\].+?(?=\[|\Z)", content1, re.M | re.DOTALL rb"\[\[\s*paths\s*\]\].+?(?=\[|\Z)", content1, re.M | re.DOTALL
): ):
content2 += content1[start : m.start()] content2 += content1[start : m.start()]
path_content = m.group() path_content = m.group()
l10n_line = re.search(br"^\s*l10n\s*=.*$", path_content, flags=re.M).group() l10n_line = re.search(rb"^\s*l10n\s*=.*$", path_content, flags=re.M).group()
# remove variable expansions # remove variable expansions
new_reference = re.sub(br"{\s*\S+\s*}", b"", l10n_line) new_reference = re.sub(rb"{\s*\S+\s*}", b"", l10n_line)
# make the l10n a reference line # make the l10n a reference line
new_reference = re.sub(br"^(\s*)l10n(\s*=)", br"\1reference\2", new_reference) new_reference = re.sub(rb"^(\s*)l10n(\s*=)", rb"\1reference\2", new_reference)
content2 += re.sub( content2 += re.sub(
br"^\s*reference\s*=.*$", new_reference, path_content, flags=re.M rb"^\s*reference\s*=.*$", new_reference, path_content, flags=re.M
) )
start = m.end() start = m.end()
content2 += content1[start:] content2 += content1[start:]
@ -45,11 +45,11 @@ def process_config(toml_content):
start = 0 start = 0
content3 = b"" content3 = b""
for m in re.finditer( for m in re.finditer(
br"\[\[\s*includes\s*\]\].+?(?=\[|\Z)", content2, re.M | re.DOTALL rb"\[\[\s*includes\s*\]\].+?(?=\[|\Z)", content2, re.M | re.DOTALL
): ):
content3 += content2[start : m.start()] content3 += content2[start : m.start()]
include_content = m.group() include_content = m.group()
m_ = re.search(br'^\s*path = "(.+?)"', include_content, flags=re.M) m_ = re.search(rb'^\s*path = "(.+?)"', include_content, flags=re.M)
content3 += ( content3 += (
include_content[: m_.start(1)] include_content[: m_.start(1)]
+ generate_filename(m_.group(1)) + generate_filename(m_.group(1))

View file

@ -452,7 +452,6 @@ def completion_fish(command_context, outfile):
cmds_opts.append(comp) cmds_opts.append(comp)
for sub in cmd.subcommands: for sub in cmd.subcommands:
for opt_strs, description in sub.options.items(): for opt_strs, description in sub.options.items():
comp = ( comp = (
"complete -c mach -A -n '__fish_mach_complete_subcommand {} {}' " "complete -c mach -A -n '__fish_mach_complete_subcommand {} {}' "

View file

@ -137,7 +137,7 @@ def _patch_absolute_paths(sentry_event, topsrcdir: Path):
else: else:
return value return value
for (target_path, replacement) in ( for target_path, replacement in (
(get_state_dir(), "<statedir>"), (get_state_dir(), "<statedir>"),
(str(topsrcdir), "<topsrcdir>"), (str(topsrcdir), "<topsrcdir>"),
(str(Path.home()), "~"), (str(Path.home()), "~"),

View file

@ -23,7 +23,6 @@ COMPLETE = (
def process_manifest(destdir, paths, track, no_symlinks=False, defines={}): def process_manifest(destdir, paths, track, no_symlinks=False, defines={}):
if os.path.exists(track): if os.path.exists(track):
# We use the same format as install manifests for the tracking # We use the same format as install manifests for the tracking
# data. # data.

View file

@ -648,7 +648,6 @@ class FileRecordJSONDecoder(json.JSONDecoder):
class Manifest(object): class Manifest(object):
valid_formats = ("json",) valid_formats = ("json",)
def __init__(self, file_records=None): def __init__(self, file_records=None):

View file

@ -62,7 +62,6 @@ def verifyIniFile(initests, directory):
found = False found = False
for f in files: for f in files:
fname = f.split("/")[-1] fname = f.split("/")[-1]
if fname.endswith(".in"): if fname.endswith(".in"):
fname = ".in".join(fname.split(".in")[:-1]) fname = ".in".join(fname.split(".in")[:-1])

View file

@ -104,13 +104,13 @@ def android_version_code_v1(buildid, cpu_arch=None, min_sdk=0, max_sdk=0):
"android:versionCode from build ID %s: hours underflow " "android:versionCode from build ID %s: hours underflow "
"bits allotted!" % buildid "bits allotted!" % buildid
) )
if base > 2 ** 17: if base > 2**17:
raise ValueError( raise ValueError(
"Something has gone horribly wrong: cannot calculate " "Something has gone horribly wrong: cannot calculate "
"android:versionCode from build ID %s: hours overflow " "android:versionCode from build ID %s: hours overflow "
"bits allotted!" % buildid "bits allotted!" % buildid
) )
if base > 2 ** 17 - 366 * 24: if base > 2**17 - 366 * 24:
raise ValueError( raise ValueError(
"Running out of low order bits calculating " "Running out of low order bits calculating "
"android:versionCode from build ID %s: " "android:versionCode from build ID %s: "

View file

@ -513,7 +513,6 @@ def artifact_toolchain(
requests.exceptions.ChunkedEncodingError, requests.exceptions.ChunkedEncodingError,
requests.exceptions.ConnectionError, requests.exceptions.ConnectionError,
) as e: ) as e:
if isinstance(e, requests.exceptions.HTTPError): if isinstance(e, requests.exceptions.HTTPError):
# The relengapi proxy likes to return error 400 bad request # The relengapi proxy likes to return error 400 bad request
# which seems improbably to be due to our (simple) GET # which seems improbably to be due to our (simple) GET

View file

@ -363,7 +363,6 @@ class CommonBackend(BuildBackend):
) )
def _handle_webidl_collection(self, webidls): def _handle_webidl_collection(self, webidls):
bindings_dir = mozpath.join(self.environment.topobjdir, "dom", "bindings") bindings_dir = mozpath.join(self.environment.topobjdir, "dom", "bindings")
all_inputs = set(webidls.all_static_sources()) all_inputs = set(webidls.all_static_sources())

View file

@ -121,7 +121,6 @@ class ConfigEnvironment(object):
source=None, source=None,
mozconfig=None, mozconfig=None,
): ):
if not source: if not source:
source = mozpath.join(topobjdir, "config.status") source = mozpath.join(topobjdir, "config.status")
self.source = source self.source = source

View file

@ -343,8 +343,8 @@ class CppEclipseBackend(CommonBackend):
for i in args["includes"]: for i in args["includes"]:
dirsettings += add_abs_include_path(i) dirsettings += add_abs_include_path(i)
for d in args["defines"]: for d in args["defines"]:
assert d[:2] == u"-D" or d[:2] == u"-U" assert d[:2] == "-D" or d[:2] == "-U"
if d[:2] == u"-U": if d[:2] == "-U":
# gfx/harfbuzz/src uses -UDEBUG, at least on Mac # gfx/harfbuzz/src uses -UDEBUG, at least on Mac
# netwerk/sctp/src uses -U__APPLE__ on Mac # netwerk/sctp/src uses -U__APPLE__ on Mac
# XXX We should make this code smart enough to remove existing defines. # XXX We should make this code smart enough to remove existing defines.

View file

@ -225,7 +225,7 @@ class FasterMakeBackend(MakeBackend, PartialBackend):
mk.create_rule([target]).add_dependencies( mk.create_rule([target]).add_dependencies(
"%s" % d[0] for d in sorted(deps, key=itemgetter(0)) "%s" % d[0] for d in sorted(deps, key=itemgetter(0))
) )
for (merge, ref_file, l10n_file) in deps: for merge, ref_file, l10n_file in deps:
rule = mk.create_rule([merge]).add_dependencies( rule = mk.create_rule([merge]).add_dependencies(
[ref_file, l10n_file] + python_deps [ref_file, l10n_file] + python_deps
) )

View file

@ -873,7 +873,6 @@ class RecursiveMakeBackend(MakeBackend):
unified_files_makefile_variable="unified_files", unified_files_makefile_variable="unified_files",
include_curdir_build_rules=True, include_curdir_build_rules=True,
): ):
# In case it's a generator. # In case it's a generator.
unified_source_mapping = sorted(unified_source_mapping) unified_source_mapping = sorted(unified_source_mapping)

View file

@ -441,7 +441,6 @@ class VisualStudioBackend(CommonBackend):
def _create_natvis_type( def _create_natvis_type(
self, doc, visualizer, name, displayString, stringView=None self, doc, visualizer, name, displayString, stringView=None
): ):
t = visualizer.appendChild(doc.createElement("Type")) t = visualizer.appendChild(doc.createElement("Type"))
t.setAttribute("Name", name) t.setAttribute("Name", name)
@ -593,7 +592,6 @@ class VisualStudioBackend(CommonBackend):
headers=[], headers=[],
sources=[], sources=[],
): ):
impl = getDOMImplementation() impl = getDOMImplementation()
doc = impl.createDocument(MSBUILD_NAMESPACE, "Project", None) doc = impl.createDocument(MSBUILD_NAMESPACE, "Project", None)

View file

@ -751,7 +751,7 @@ class MozbuildObject(ProcessExecutionMixin):
if not psutil or not job_size: if not psutil or not job_size:
num_jobs = cpus num_jobs = cpus
else: else:
mem_gb = psutil.virtual_memory().total / 1024 ** 3 mem_gb = psutil.virtual_memory().total / 1024**3
from_mem = round(mem_gb / job_size) from_mem = round(mem_gb / job_size)
num_jobs = max(1, min(cpus, from_mem)) num_jobs = max(1, min(cpus, from_mem))
print( print(

View file

@ -99,7 +99,6 @@ class StaticAnalysisMonitor(object):
self._warnings_database = WarningsDatabase() self._warnings_database = WarningsDatabase()
def on_warning(warning): def on_warning(warning):
# Output paths relative to repository root if the paths are under repo tree # Output paths relative to repository root if the paths are under repo tree
warning["filename"] = build_repo_relative_path( warning["filename"] = build_repo_relative_path(
warning["filename"], self._srcdir warning["filename"], self._srcdir
@ -542,7 +541,6 @@ def _get_clang_tidy_command(
jobs, jobs,
fix, fix,
): ):
if checks == "-*": if checks == "-*":
checks = ",".join(get_clang_tidy_config(command_context).checks) checks = ",".join(get_clang_tidy_config(command_context).checks)
@ -786,7 +784,6 @@ def autotest(
error_code = ret_val error_code = ret_val
if error_code != TOOLS_SUCCESS: if error_code != TOOLS_SUCCESS:
command_context.log( command_context.log(
logging.INFO, logging.INFO,
"static-analysis", "static-analysis",
@ -1560,7 +1557,6 @@ def get_clang_tools(
download_if_needed=True, download_if_needed=True,
verbose=False, verbose=False,
): ):
rc, clang_paths = _set_clang_tools_paths(command_context) rc, clang_paths = _set_clang_tools_paths(command_context)
if rc != 0: if rc != 0:
@ -1803,7 +1799,6 @@ def _copy_clang_format_for_show_diff(
def _run_clang_format_path( def _run_clang_format_path(
command_context, clang_format, paths, output_file, output_format command_context, clang_format, paths, output_file, output_format
): ):
# Run clang-format on files or directories directly # Run clang-format on files or directories directly
from subprocess import CalledProcessError, check_output from subprocess import CalledProcessError, check_output

View file

@ -42,7 +42,6 @@ class LcovRecord(object):
self.lines = {} self.lines = {}
def __iadd__(self, other): def __iadd__(self, other):
# These shouldn't differ. # These shouldn't differ.
self.source_file = other.source_file self.source_file = other.source_file
if hasattr(other, "test_name"): if hasattr(other, "test_name"):

View file

@ -944,7 +944,6 @@ class ConfigureSandbox(dict):
@imports(_from='mozpack', _import='path', _as='mozpath') @imports(_from='mozpack', _import='path', _as='mozpath')
""" """
for value, required in ((_import, True), (_from, False), (_as, False)): for value, required in ((_import, True), (_from, False), (_as, False)):
if not isinstance(value, six.string_types) and ( if not isinstance(value, six.string_types) and (
required or value is not None required or value is not None
): ):

View file

@ -919,8 +919,8 @@ class CCacheStats(object):
ABSOLUTE_KEYS = {"cache_files", "cache_size", "cache_max_size"} ABSOLUTE_KEYS = {"cache_files", "cache_size", "cache_max_size"}
FORMAT_KEYS = {"cache_size", "cache_max_size"} FORMAT_KEYS = {"cache_size", "cache_max_size"}
GiB = 1024 ** 3 GiB = 1024**3
MiB = 1024 ** 2 MiB = 1024**2
KiB = 1024 KiB = 1024
def __init__(self, output=None, has_machine_format=False): def __init__(self, output=None, has_machine_format=False):

View file

@ -80,7 +80,6 @@ class Clobberer(object):
# Object directory clobber older than current is fine. # Object directory clobber older than current is fine.
if os.path.getmtime(self.src_clobber) <= os.path.getmtime(self.obj_clobber): if os.path.getmtime(self.src_clobber) <= os.path.getmtime(self.obj_clobber):
return False return False
return True return True
@ -232,7 +231,7 @@ class Clobberer(object):
self.remove_objdir(False) self.remove_objdir(False)
print("Successfully completed auto clobber.", file=fh) print("Successfully completed auto clobber.", file=fh)
return True, True, None return True, True, None
except (IOError) as error: except IOError as error:
return ( return (
True, True,
False, False,

View file

@ -231,7 +231,7 @@ def memory(**kwargs) -> DoctorCheck:
"""Check the host machine has the recommended memory to develop Firefox.""" """Check the host machine has the recommended memory to develop Firefox."""
memory = psutil.virtual_memory().total memory = psutil.virtual_memory().total
# Convert to gigabytes. # Convert to gigabytes.
memory_GB = memory / 1024 ** 3.0 memory_GB = memory / 1024**3.0
if memory_GB < MEMORY_THRESHOLD: if memory_GB < MEMORY_THRESHOLD:
status = CheckStatus.WARNING status = CheckStatus.WARNING
desc = "%.1fGB of physical memory, <%.1fGB" % (memory_GB, MEMORY_THRESHOLD) desc = "%.1fGB of physical memory, <%.1fGB" % (memory_GB, MEMORY_THRESHOLD)
@ -266,8 +266,8 @@ def storage_freespace(topsrcdir: str, topobjdir: str, **kwargs) -> List[DoctorCh
try: try:
usage = psutil.disk_usage(mount) usage = psutil.disk_usage(mount)
freespace, size = usage.free, usage.total freespace, size = usage.free, usage.total
freespace_GB = freespace / 1024 ** 3 freespace_GB = freespace / 1024**3
size_GB = size / 1024 ** 3 size_GB = size / 1024**3
if freespace_GB < FREESPACE_THRESHOLD: if freespace_GB < FREESPACE_THRESHOLD:
status = CheckStatus.WARNING status = CheckStatus.WARNING
desc.append( desc.append(

View file

@ -490,7 +490,6 @@ class LinkFlags(BaseCompileFlags):
not self._context.config.substs.get("MOZ_DEBUG"), not self._context.config.substs.get("MOZ_DEBUG"),
] ]
): ):
if self._context.config.substs.get("MOZ_OPTIMIZE"): if self._context.config.substs.get("MOZ_OPTIMIZE"):
flags.append("-OPT:REF,ICF") flags.append("-OPT:REF,ICF")

View file

@ -172,7 +172,6 @@ class TreeMetadataEmitter(LoggingMixin):
yield o yield o
def _emit_libs_derived(self, contexts): def _emit_libs_derived(self, contexts):
# First aggregate idl sources. # First aggregate idl sources.
webidl_attrs = [ webidl_attrs = [
("GENERATED_EVENTS_WEBIDL_FILES", lambda c: c.generated_events_sources), ("GENERATED_EVENTS_WEBIDL_FILES", lambda c: c.generated_events_sources),
@ -1461,15 +1460,11 @@ class TreeMetadataEmitter(LoggingMixin):
if mozpath.split(base)[0] == "res": if mozpath.split(base)[0] == "res":
has_resources = True has_resources = True
for f in files: for f in files:
if ( if var in (
var "FINAL_TARGET_PP_FILES",
in ( "OBJDIR_PP_FILES",
"FINAL_TARGET_PP_FILES", "LOCALIZED_PP_FILES",
"OBJDIR_PP_FILES", ) and not isinstance(f, SourcePath):
"LOCALIZED_PP_FILES",
)
and not isinstance(f, SourcePath)
):
raise SandboxValidationError( raise SandboxValidationError(
("Only source directory paths allowed in " + "%s: %s") ("Only source directory paths allowed in " + "%s: %s")
% (var, f), % (var, f),
@ -1679,7 +1674,7 @@ class TreeMetadataEmitter(LoggingMixin):
if not (generated_files or localized_generated_files): if not (generated_files or localized_generated_files):
return return
for (localized, gen) in ( for localized, gen in (
(False, generated_files), (False, generated_files),
(True, localized_generated_files), (True, localized_generated_files),
): ):

View file

@ -531,7 +531,6 @@ class BuildReaderError(Exception):
other_error=None, other_error=None,
sandbox_called_error=None, sandbox_called_error=None,
): ):
self.file_stack = file_stack self.file_stack = file_stack
self.trace = trace self.trace = trace
self.sandbox_called_error = sandbox_called_error self.sandbox_called_error = sandbox_called_error

View file

@ -297,7 +297,6 @@ def process_gn_config(
# Process all targets from the given gn project and its dependencies. # Process all targets from the given gn project and its dependencies.
for target_fullname, spec in six.iteritems(targets): for target_fullname, spec in six.iteritems(targets):
target_path, target_name = target_info(target_fullname) target_path, target_name = target_info(target_fullname)
context_attrs = {} context_attrs = {}
@ -396,7 +395,7 @@ def process_gn_config(
".mm": ("CMMFLAGS", ["cflags", "cflags_objcc"]), ".mm": ("CMMFLAGS", ["cflags", "cflags_objcc"]),
} }
variables = (suffix_map[e] for e in extensions if e in suffix_map) variables = (suffix_map[e] for e in extensions if e in suffix_map)
for (var, flag_keys) in variables: for var, flag_keys in variables:
flags = [ flags = [
_f for _k in flag_keys for _f in spec.get(_k, []) if _f in mozilla_flags _f for _k in flag_keys for _f in spec.get(_k, []) if _f in mozilla_flags
] ]
@ -522,7 +521,6 @@ def write_mozbuild(
mozilla_flags, mozilla_flags,
write_mozbuild_variables, write_mozbuild_variables,
): ):
all_mozbuild_results = [] all_mozbuild_results = []
for gn_config in gn_configs: for gn_config in gn_configs:
@ -626,7 +624,6 @@ def write_mozbuild(
("OS_TARGET", "CPU_ARCH"), ("OS_TARGET", "CPU_ARCH"),
("OS_TARGET", "CPU_ARCH", "MOZ_X11"), ("OS_TARGET", "CPU_ARCH", "MOZ_X11"),
): ):
conditions = set() conditions = set()
for args in dirs_by_config.keys(): for args in dirs_by_config.keys():
cond = tuple(((k, dict(args).get(k) or "") for k in attrs)) cond = tuple(((k, dict(args).get(k) or "") for k in attrs))

View file

@ -98,7 +98,6 @@ class DeprecatedJarManifest(Exception):
class JarManifestParser(object): class JarManifestParser(object):
ignore = re.compile("\s*(\#.*)?$") ignore = re.compile("\s*(\#.*)?$")
jarline = re.compile( jarline = re.compile(
""" """
@ -210,7 +209,6 @@ class JarMaker(object):
def __init__( def __init__(
self, outputFormat="flat", useJarfileManifest=True, useChromeManifest=False self, outputFormat="flat", useJarfileManifest=True, useChromeManifest=False
): ):
self.outputFormat = outputFormat self.outputFormat = outputFormat
self.useJarfileManifest = useJarfileManifest self.useJarfileManifest = useJarfileManifest
self.useChromeManifest = useChromeManifest self.useChromeManifest = useChromeManifest

View file

@ -936,7 +936,6 @@ def gtest(
debugger, debugger,
debugger_args, debugger_args,
): ):
# We lazy build gtest because it's slow to link # We lazy build gtest because it's slow to link
try: try:
command_context.config_environment command_context.config_environment

View file

@ -286,7 +286,6 @@ class MozconfigLoader(object):
in_variable = None in_variable = None
for line in output.splitlines(): for line in output.splitlines():
if not line: if not line:
continue continue

View file

@ -13,7 +13,6 @@ from mozbuild.repackaging.application_ini import get_application_ini_value
def repackage_dmg(infile, output): def repackage_dmg(infile, output):
if not tarfile.is_tarfile(infile): if not tarfile.is_tarfile(infile):
raise Exception("Input file %s is not a valid tarfile." % infile) raise Exception("Input file %s is not a valid tarfile." % infile)

View file

@ -20,7 +20,6 @@ _MSI_ARCH = {
def update_wsx(wfile, pvalues): def update_wsx(wfile, pvalues):
parsed = minidom.parse(wfile) parsed = minidom.parse(wfile)
# construct a dictinary for the pre-processing options # construct a dictinary for the pre-processing options

View file

@ -13,7 +13,6 @@ from mozbuild.bootstrap import bootstrap_toolchain
def repackage_pkg(infile, output): def repackage_pkg(infile, output):
if not tarfile.is_tarfile(infile): if not tarfile.is_tarfile(infile):
raise Exception("Input file %s is not a valid tarfile." % infile) raise Exception("Input file %s is not a valid tarfile." % infile)

View file

@ -154,7 +154,7 @@ langpack-contributors = { "" }
self.assertEqual(len(description), 132) self.assertEqual(len(description), 132)
def test_get_version_maybe_buildid(self): def test_get_version_maybe_buildid(self):
for (app_version, buildid, expected_version) in [ for app_version, buildid, expected_version in [
("109", "", "109"), ("109", "", "109"),
("109.0", "", "109.0"), ("109.0", "", "109.0"),
("109.0.0", "", "109.0.0"), ("109.0.0", "", "109.0.0"),

View file

@ -1292,7 +1292,7 @@ class TestRecursiveMakeBackend(BackendTester):
("not-installed", "not-installed.prog"), ("not-installed", "not-installed.prog"),
] ]
prefix = "PROGRAM = " prefix = "PROGRAM = "
for (subdir, expected_program) in expected: for subdir, expected_program in expected:
with io.open(os.path.join(env.topobjdir, subdir, "backend.mk"), "r") as fh: with io.open(os.path.join(env.topobjdir, subdir, "backend.mk"), "r") as fh:
lines = fh.readlines() lines = fh.readlines()
program = [ program = [

View file

@ -4,6 +4,7 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this # License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/. # file, You can obtain one at http://mozilla.org/MPL/2.0/.
# For more complex and repetitive things, we can create templates # For more complex and repetitive things, we can create templates
@template @template
def check_compiler_flag(flag): def check_compiler_flag(flag):
@ -18,6 +19,7 @@ def check_compiler_flag(flag):
check_compiler_flag("-Werror=foobar") check_compiler_flag("-Werror=foobar")
# Normal functions can be used in @depends functions. # Normal functions can be used in @depends functions.
def fortytwo(): def fortytwo():
return 42 return 42
@ -45,6 +47,7 @@ def check(value):
set_config("TEMPLATE_VALUE_2", check) set_config("TEMPLATE_VALUE_2", check)
# Normal functions can use @imports too to import modules. # Normal functions can use @imports too to import modules.
@imports("sys") @imports("sys")
def platform(): def platform():

Some files were not shown because too many files have changed in this diff Show more