Bug 1472992 - [jsshell] Run javascript shell benchmarks against Google V8, r=jmaher

This runs the jsshell benchmarks against Google's V8 engine in addition to spidermonkey.
Both shells will run in the same task to keep things simple and decluttered. Though we
could split them out to separate tasks at a later date if needed.

Differential Revision: https://phabricator.services.mozilla.com/D3356

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Andrew Halberstadt 2018-08-15 13:52:47 +00:00
parent 6a85eae780
commit 5dbfd833bb
5 changed files with 103 additions and 27 deletions

View file

@ -3,7 +3,8 @@ project-repo-param-prefix: ''
treeherder:
group-names:
'cram': 'Cram tests'
'js-bench': 'JavaScript shell benchmarks'
'js-bench-sm': 'JavaScript shell benchmarks with Spidermonkey'
'js-bench-v8': 'JavaScript shell benchmarks with Google V8'
'mocha': 'Mocha unit tests'
'py2': 'Python 2 unit tests'
'py3': 'Python 3 unit tests'

View file

@ -1,8 +1,15 @@
d8:
description: V8 debug shell
fetch:
type: static-url
url: https://github.com/mozilla/perf-automation/releases/download/d8-6.7.17/d8-6.7.17.zip
sha256: 0aa1c4e630de78373185fc1c0fa34bc87826f63fd4cbb664668891d6f6a6b24e
size: 20578358
unity-webgl:
description: unity-webgl benchmark
fetch:
type: static-url
artifact-name: unity-webgl.zip
url: https://github.com/mozilla/perf-automation/releases/download/unity-webgl-v1/unity-webgl-6beb3d3e22ab.zip
sha256: f71ee3a3f5b9513f041e1dd01c032d51f2071e1ad130e8ac2cf0c553c468b9ea
size: 27062962
@ -11,7 +18,6 @@ assorted-dom:
description: assorted-dom benchmark
fetch:
type: static-url
artifact-name: assorted-dom.zip
url: https://github.com/mozilla/perf-automation/releases/download/assorted-dom-v1/assorted-dom-4befd28725c6.zip
sha256: e4eafe4a8e70c7ae6d42d668d3b1640b9fd9b696c486ff35aab754c368f78c2c
size: 402665
@ -20,7 +26,6 @@ web-tooling-benchmark:
description: Web Tooling Benchmark
fetch:
type: static-url
artifact-name: web-tooling-benchmark.zip
url: https://github.com/mozilla/perf-automation/releases/download/V1/web-tooling-benchmark-b2ac25c897c9.zip
sha256: 93b0b51df0cec3ca9bfa0bdf81d782306dcf18532e39b3ff3180409125daaff1
size: 5444135

View file

@ -8,47 +8,57 @@ job-defaults:
by-platform:
linux64.*:
env:
SHELL: /bin/bash
JSSHELL: /home/cltbld/fetches/js
by-shell:
sm:
SHELL: /bin/bash
JSSHELL: /home/cltbld/fetches/js
v8:
SHELL: /bin/bash
JSSHELL: /home/cltbld/fetches/d8/d8
max-run-time: 1800
treeherder:
kind: test
tier: 2
run:
using: mach
using: run-task
workdir: /home/cltbld
command: >
cd $GECKO_PATH &&
./mach jsshell-bench --perfherder={shell} --binary=$JSSHELL {test}
run-on-projects: ['mozilla-central', 'try']
fetches:
build:
- target.jsshell.zip
fetch:
- d8
bench-ares6:
description: Ares6 JavaScript shell benchmark suite
shell: ['sm', 'v8']
test: ares6
treeherder:
symbol: js-bench(ares6)
run:
mach: jsshell-bench --binary $JSSHELL --perfherder ares6
symbol: ares6
bench-sixspeed:
description: Six-Speed JavaScript shell benchmark suite
shell: ['sm', 'v8']
test: six-speed
treeherder:
symbol: js-bench(6speed)
run:
mach: jsshell-bench --binary $JSSHELL --perfherder six-speed
symbol: 6speed
bench-sunspider:
description: SunSpider JavaScript shell benchmark suite
shell: ['sm']
test: sunspider
treeherder:
symbol: js-bench(sunspider)
run:
mach: jsshell-bench --binary $JSSHELL --perfherder sunspider
symbol: sunspider
bench-web-tooling:
description: Web Tooling shell benchmark suite
shell: ['sm', 'v8']
test: web-tooling-benchmark
treeherder:
symbol: js-bench(webtool)
run:
mach: jsshell-bench --binary $JSSHELL --perfherder web-tooling-benchmark
symbol: webtool
fetches:
fetch:
- web-tooling-benchmark

View file

@ -128,6 +128,36 @@ def split_python(config, jobs):
yield pyjob
@transforms.add
def split_jsshell(config, jobs):
all_shells = {
'sm': "Spidermonkey",
'v8': "Google V8"
}
for job in jobs:
if not job['name'].startswith('jsshell'):
yield job
continue
test = job.pop('test')
for shell in job.get('shell', all_shells.keys()):
assert shell in all_shells
new_job = copy.deepcopy(job)
new_job['name'] = '{}-{}'.format(new_job['name'], shell)
new_job['description'] = '{} on {}'.format(new_job['description'], all_shells[shell])
new_job['shell'] = shell
group = 'js-bench-{}'.format(shell)
symbol = split_symbol(new_job['treeherder']['symbol'])[1]
new_job['treeherder']['symbol'] = join_symbol(group, symbol)
run = new_job['run']
run['command'] = run['command'].format(shell=shell, SHELL=shell.upper(), test=test)
yield new_job
def add_build_dependency(config, job):
"""
Add build dependency to the job and installer_url to env.
@ -172,3 +202,25 @@ def handle_platform(config, jobs):
del job['platform']
yield job
@transforms.add
def handle_shell(config, jobs):
"""
Handle the 'shell' property.
"""
fields = [
'run-on-projects',
'worker.env',
]
for job in jobs:
if not job.get('shell'):
yield job
continue
for field in fields:
resolve_keyed_by(job, field, item_name=job['name'])
del job['shell']
yield job

View file

@ -31,9 +31,10 @@ class Benchmark(object):
should_alert = False
units = 'score'
def __init__(self, shell, args=None):
def __init__(self, shell, args=None, shell_name=None):
self.shell = shell
self.args = args
self.shell_name = shell_name
@abstractproperty
def name(self):
@ -70,6 +71,10 @@ class Benchmark(object):
def reset(self):
"""Resets state between runs."""
name = self.name
if self.shell_name:
name = '{}-{}'.format(name, self.shell_name)
self.perfherder_data = {
'framework': {
'name': 'js-bench',
@ -77,7 +82,7 @@ class Benchmark(object):
'suites': [
{
'lowerIsBetter': self.lower_is_better,
'name': self.name,
'name': name,
'shouldAlert': self.should_alert,
'subtests': [],
'units': self.units,
@ -266,8 +271,11 @@ class WebToolingBenchmark(Benchmark):
if score_name == 'mean':
bench_mean = mean
self.suite['value'] = bench_mean
def _provision_benchmark_script(self):
if os.path.isdir(self.path):
return
# Some benchmarks may have been downloaded from a fetch task, make
# sure they get copied over.
fetches_dir = os.environ.get('MOZ_FETCHES_DIR')
@ -275,7 +283,7 @@ class WebToolingBenchmark(Benchmark):
webtool_fetchdir = os.path.join(fetches_dir, 'web-tooling-benchmark')
if os.path.isdir(webtool_fetchdir):
shutil.copytree(webtool_fetchdir, self.path)
def run(self):
self._provision_benchmark_script()
return super(WebToolingBenchmark, self).run()
@ -289,7 +297,7 @@ all_benchmarks = {
}
def run(benchmark, binary=None, extra_args=None, perfherder=False):
def run(benchmark, binary=None, extra_args=None, perfherder=None):
if not binary:
try:
binary = os.path.join(build.bindir, 'js' + build.substs['BIN_SUFFIX'])
@ -300,7 +308,7 @@ def run(benchmark, binary=None, extra_args=None, perfherder=False):
print(JSSHELL_NOT_FOUND)
return 1
bench = all_benchmarks.get(benchmark)(binary, args=extra_args)
bench = all_benchmarks.get(benchmark)(binary, args=extra_args, shell_name=perfherder)
res = bench.run()
if perfherder:
@ -316,8 +324,8 @@ def get_parser():
help="Path to the JS shell binary to use.")
parser.add_argument('--arg', dest='extra_args', action='append', default=None,
help="Extra arguments to pass to the JS shell.")
parser.add_argument('--perfherder', action='store_true', default=False,
help="Log PERFHERDER_DATA to stdout.")
parser.add_argument('--perfherder', default=None,
help="Log PERFHERDER_DATA to stdout using the given suite name.")
return parser