Skip to content

Commit

Permalink
[test] Switch to flattened json output
Browse files Browse the repository at this point in the history
This flattens the json output to one result record as a dict. In
the past several records with different arch/mode combinations
could be run, but this is deprecated since several releases.

We also drop storing the arch/mode information in the record as it
isn't used on the infra side for anything.

This was prepared on the infra side by:
https://crrev.com/c/2453562

Bug: chromium:1132088
Change-Id: I944514dc00a671e7671bcdbcaa3a72407476d7ad
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2456987
Reviewed-by: Liviu Rau <[email protected]>
Commit-Queue: Michael Achenbach <[email protected]>
Cr-Commit-Position: refs/heads/master@{#70402}
  • Loading branch information
mi-ac authored and Commit Bot committed Oct 8, 2020
1 parent 0ce4c51 commit 373a9a8
Show file tree
Hide file tree
Showing 6 changed files with 8 additions and 33 deletions.
13 changes: 2 additions & 11 deletions tools/testrunner/base_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@


ModeConfig = namedtuple(
'ModeConfig', 'label flags timeout_scalefactor status_mode execution_mode')
'ModeConfig', 'label flags timeout_scalefactor status_mode')

DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"]
RELEASE_FLAGS = ["--nohard-abort"]
Expand All @@ -127,15 +127,13 @@
flags=DEBUG_FLAGS,
timeout_scalefactor=4,
status_mode="debug",
execution_mode="debug",
)

RELEASE_MODE = ModeConfig(
label='release',
flags=RELEASE_FLAGS,
timeout_scalefactor=1,
status_mode="release",
execution_mode="release",
)

# Normal trybot release configuration. There, dchecks are always on which
Expand All @@ -146,7 +144,6 @@
flags=RELEASE_FLAGS,
timeout_scalefactor=4,
status_mode="debug",
execution_mode="release",
)

PROGRESS_INDICATORS = {
Expand Down Expand Up @@ -761,13 +758,7 @@ def _get_shard_info(self, options):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
if options.json_test_results:
# TODO(machenbach): Deprecate the execution mode. Previously it was meant
# to differentiate several records in the json output. But there is now
# only one record and the mode information is redundant.
procs.append(progress.JsonTestProgressIndicator(
self.framework_name,
self.build_config.arch,
self.mode_options.execution_mode))
procs.append(progress.JsonTestProgressIndicator(self.framework_name))

for proc in procs:
proc.configure(options)
Expand Down
4 changes: 1 addition & 3 deletions tools/testrunner/standard_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,10 +364,8 @@ def _duration_results_text(test):
]

assert os.path.exists(options.json_test_results)
complete_results = []
with open(options.json_test_results, "r") as f:
complete_results = json.loads(f.read())
output = complete_results[0]
output = json.load(f)
lines = []
for test in output['slowest_tests']:
suffix = ''
Expand Down
18 changes: 4 additions & 14 deletions tools/testrunner/testproc/progress.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ def _clear_line(self, last_length):


class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name, arch, mode):
def __init__(self, framework_name):
super(JsonTestProgressIndicator, self).__init__()
# We want to drop stdout/err for all passed tests on the first try, but we
# need to get outputs for all runs after the first one. To accommodate that,
Expand All @@ -358,8 +358,6 @@ def __init__(self, framework_name, arch, mode):
self._requirement = base.DROP_PASS_STDOUT

self.framework_name = framework_name
self.arch = arch
self.mode = mode
self.results = []
self.duration_sum = 0
self.test_count = 0
Expand Down Expand Up @@ -429,24 +427,16 @@ def _test_record(self, test, result, output, run):
}

def finished(self):
complete_results = []
if os.path.exists(self.options.json_test_results):
with open(self.options.json_test_results, "r") as f:
# On bots we might start out with an empty file.
complete_results = json.loads(f.read() or "[]")

duration_mean = None
if self.test_count:
duration_mean = self.duration_sum / self.test_count

complete_results.append({
"arch": self.arch,
"mode": self.mode,
result = {
"results": self.results,
"slowest_tests": self.tests.as_list(),
"duration_mean": duration_mean,
"test_total": self.test_count,
})
}

with open(self.options.json_test_results, "w") as f:
f.write(json.dumps(complete_results))
json.dump(result, f)
2 changes: 1 addition & 1 deletion tools/unittests/run_tests_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ def check_cleaned_json_output(
self, expected_results_name, actual_json, basedir):
# Check relevant properties of the json output.
with open(actual_json) as f:
json_output = json.load(f)[0]
json_output = json.load(f)

# Replace duration in actual output as it's non-deterministic. Also
# replace the python executable prefix as it has a different absolute
Expand Down
2 changes: 0 additions & 2 deletions tools/unittests/testdata/expected_test_results1.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
{
"arch": "x64",
"duration_mean": 1,
"mode": "release",
"results": [
{
"command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
Expand Down
2 changes: 0 additions & 2 deletions tools/unittests/testdata/expected_test_results2.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
{
"arch": "x64",
"duration_mean": 1,
"mode": "release",
"results": [
{
"command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
Expand Down

0 comments on commit 373a9a8

Please sign in to comment.