ci: speed up unit-test execution (#2586)

Co-authored-by: water111 <48171810+water111@users.noreply.github.com>
This commit is contained in:
Tyler Wilding 2023-04-30 13:27:08 -05:00 committed by GitHub
parent cc878c1055
commit 728ef59477
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 2084 additions and 13 deletions

View file

@ -46,22 +46,19 @@ jobs:
CXX: g++
run: |
cmake -B build --preset=${{ inputs.cmakePreset }} \
-DCODE_COVERAGE=ON \
-DCODE_COVERAGE=OFF \
-DCMAKE_C_COMPILER_LAUNCHER=${{ github.workspace }}/buildcache/bin/buildcache \
-DCMAKE_CXX_COMPILER_LAUNCHER=${{ github.workspace }}/buildcache/bin/buildcache
- name: Build Project
run: cmake --build build --parallel $((`nproc`)) -- -w dupbuild=warn
- name: Run Tests - With Coverage
working-directory: ./build
env:
GTEST_OUTPUT: "xml:opengoal-test-report.xml"
run: ninja goalc-test_coverage -w dupbuild=warn
- name: Run Tests
run: ./test.sh
- name: Submit Coverage Report to Codacy
uses: codacy/codacy-coverage-reporter-action@v1
continue-on-error: true
with:
project-token: ${{ secrets.CODACY_PROJECT_KEY }}
coverage-reports: ./build/goalc-test_coverage.info
# - name: Submit Coverage Report to Codacy
# uses: codacy/codacy-coverage-reporter-action@v1
# continue-on-error: true
# with:
# project-token: ${{ secrets.CODACY_PROJECT_KEY }}
# coverage-reports: ./build/goalc-test_coverage.info

View file

@ -149,7 +149,7 @@
"description": "Build with Clang as Release without Debug Symbols and ASAN Fuzzing",
"inherits": ["base-linux-release", "base-clang"],
"cacheVariables": {
"ASAN_BUILD": "ON"
"ASAN_BUILD": "OFF"
}
},
{

2
third-party/gtest-parallel/.gitignore generated vendored Normal file
View file

@ -0,0 +1,2 @@
.*.swp
*.py[co]

4
third-party/gtest-parallel/.style.yapf generated vendored Normal file
View file

@ -0,0 +1,4 @@
[style]
based_on_style = pep8
indent_width = 2
column_limit = 80

28
third-party/gtest-parallel/CONTRIBUTING.md generated vendored Normal file
View file

@ -0,0 +1,28 @@
# How to Contribute
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution;
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
## Code reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.
## Community Guidelines
This project follows [Google's Open Source Community
Guidelines](https://opensource.google.com/conduct/).

202
third-party/gtest-parallel/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

90
third-party/gtest-parallel/README.md generated vendored Normal file
View file

@ -0,0 +1,90 @@
# gtest-parallel
_This is not an official Google product._
`gtest-parallel` is a script that executes [Google
Test](https://github.com/google/googletest) binaries in parallel, providing good
speedup for single-threaded tests (on multi-core machines) and tests that do not
run at 100% CPU (on single- or multi-core machines).
The script works by listing the tests of each binary, and then executing them on
workers in separate processes. This works fine so long as the tests are self
contained and do not share resources (reading data is fine, writing to the same
log file is probably not).
## Basic Usage
_For a full list of options, see `--help`._
$ ./gtest-parallel path/to/binary...
This shards all enabled tests across a number of workers, defaulting to the
number of cores in the system. If your system uses Python 2, but you have no
python2 binary, run `python gtest-parallel` instead of `./gtest-parallel`.
To run only a select set of tests, run:
$ ./gtest-parallel path/to/binary... --gtest_filter=Foo.*:Bar.*
This filter takes the same parameters as Google Test, so -Foo.\* can be used for
test exclusion as well. This is especially useful for slow tests (that you're
not working on), or tests that may not be able to run in parallel.
## Flakiness
Flaky tests (tests that do not deterministically pass or fail) often cause a lot
of developer headache. A test that fails only 1% of the time can be very hard to
detect as flaky, and even harder to convince yourself of having fixed.
`gtest-parallel` supports repeating individual tests (`--repeat=`), which can be
very useful for flakiness testing. Some tests are also more flaky under high
loads (especially tests that use realtime clocks), so raising the number of
`--workers=` well above the number of available core can often cause contention
and be fruitful for detecting flaky tests as well.
$ ./gtest-parallel out/{binary1,binary2,binary3} --repeat=1000 --workers=128
The above command repeats all tests inside `binary1`, `binary2` and `binary3`
located in `out/`. The tests are run `1000` times each on `128` workers (this is
more than I have cores on my machine anyways). This can often be done and then
left overnight if you've no initial guess to which tests are flaky and which
ones aren't. When you've figured out which tests are flaky (and want to fix
them), repeat the above command with `--gtest_filter=` to only retry the flaky
tests that you are fixing.
Note that repeated tests do run concurrently with themselves for efficiency, and
as such they have problem writing to hard-coded files, even if they are only
used by that single test. `tmpfile()` and similar library functions are often
your friends here.
### Flakiness Summaries
Especially for disabled tests, you might wonder how stable a test seems before
trying to enable it. `gtest-parallel` prints summaries (number of passed/failed
tests) when `--repeat=` is used and at least one test fails. This can be used to
generate passed/failed statistics per test. If no statistics are generated then
all invocations tests are passing, congratulations!
For example, to try all disabled tests and see how stable they are:
$ ./gtest-parallel path/to/binary... -r1000 --gtest_filter=*.DISABLED_* --gtest_also_run_disabled_tests
Which will generate something like this at the end of the run:
SUMMARY:
path/to/binary... Foo.DISABLED_Bar passed 0 / 1000 times.
path/to/binary... FooBar.DISABLED_Baz passed 30 / 1000 times.
path/to/binary... Foo.DISABLED_Baz passed 1000 / 1000 times.
## Running Tests Within Test Cases Sequentially
Sometimes tests within a single test case use globally-shared resources
(hard-coded file paths, sockets, etc.) and cannot be run in parallel. Running
such tests in parallel will either fail or be flaky (if they happen to not
overlap during execution, they pass). So long as these resources are only shared
within the same test case `gtest-parallel` can still provide some parallelism.
For such binaries where test cases are independent, `gtest-parallel` provides
`--serialize_test_cases` that runs tests within the same test case sequentially.
While generally not providing as much speedup as fully parallel test execution,
this permits such binaries to partially benefit from parallel execution.

18
third-party/gtest-parallel/gtest-parallel generated vendored Normal file
View file

@ -0,0 +1,18 @@
#!/usr/bin/env python3
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gtest_parallel
import sys
sys.exit(gtest_parallel.main())

906
third-party/gtest-parallel/gtest_parallel.py generated vendored Normal file
View file

@ -0,0 +1,906 @@
# Copyright 2013 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
from functools import total_ordering
import gzip
import io
import json
import multiprocessing
import optparse
import os
import re
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
if sys.version_info.major >= 3:
long = int
import _pickle as cPickle
import _thread as thread
else:
import cPickle
import thread
from pickle import HIGHEST_PROTOCOL as PICKLE_HIGHEST_PROTOCOL
if sys.platform == 'win32':
import msvcrt
else:
import fcntl
# An object that catches SIGINT sent to the Python process and notices
# if processes passed to wait() die by SIGINT (we need to look for
# both of those cases, because pressing Ctrl+C can result in either
# the main process or one of the subprocesses getting the signal).
#
# Before a SIGINT is seen, wait(p) will simply call p.wait() and
# return the result. Once a SIGINT has been seen (in the main process
# or a subprocess, including the one the current call is waiting for),
# wait(p) will call p.terminate() and raise ProcessWasInterrupted.
class SigintHandler(object):
class ProcessWasInterrupted(Exception):
pass
sigint_returncodes = {
-signal.SIGINT, # Unix
-1073741510, # Windows
}
def __init__(self):
self.__lock = threading.Lock()
self.__processes = set()
self.__got_sigint = False
signal.signal(signal.SIGINT, lambda signal_num, frame: self.interrupt())
def __on_sigint(self):
self.__got_sigint = True
while self.__processes:
try:
self.__processes.pop().terminate()
except OSError:
pass
def interrupt(self):
with self.__lock:
self.__on_sigint()
def got_sigint(self):
with self.__lock:
return self.__got_sigint
def wait(self, p):
with self.__lock:
if self.__got_sigint:
p.terminate()
self.__processes.add(p)
code = p.wait()
with self.__lock:
self.__processes.discard(p)
if code in self.sigint_returncodes:
self.__on_sigint()
if self.__got_sigint:
raise self.ProcessWasInterrupted
return code
sigint_handler = SigintHandler()
# Return the width of the terminal, or None if it couldn't be
# determined (e.g. because we're not being run interactively).
def term_width(out):
if not out.isatty():
return None
try:
p = subprocess.Popen(["stty", "size"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
if p.returncode != 0 or err:
return None
return int(out.split()[1])
except (IndexError, OSError, ValueError):
return None
# Output transient and permanent lines of text. If several transient
# lines are written in sequence, the new will overwrite the old. We
# use this to ensure that lots of unimportant info (tests passing)
# won't drown out important info (tests failing).
class Outputter(object):
def __init__(self, out_file):
self.__out_file = out_file
self.__previous_line_was_transient = False
self.__width = term_width(out_file) # Line width, or None if not a tty.
def transient_line(self, msg):
if self.__width is None:
self.__out_file.write(msg + "\n")
self.__out_file.flush()
else:
self.__out_file.write("\r" + msg[:self.__width].ljust(self.__width))
self.__previous_line_was_transient = True
def flush_transient_output(self):
if self.__previous_line_was_transient:
self.__out_file.write("\n")
self.__previous_line_was_transient = False
def permanent_line(self, msg):
self.flush_transient_output()
self.__out_file.write(msg + "\n")
if self.__width is None:
self.__out_file.flush()
def get_save_file_path():
"""Return path to file for saving transient data."""
if sys.platform == 'win32':
default_cache_path = os.path.join(os.path.expanduser('~'), 'AppData',
'Local')
cache_path = os.environ.get('LOCALAPPDATA', default_cache_path)
else:
# We don't use xdg module since it's not a standard.
default_cache_path = os.path.join(os.path.expanduser('~'), '.cache')
cache_path = os.environ.get('XDG_CACHE_HOME', default_cache_path)
if os.path.isdir(cache_path):
return os.path.join(cache_path, 'gtest-parallel')
else:
sys.stderr.write('Directory {} does not exist'.format(cache_path))
return os.path.join(os.path.expanduser('~'), '.gtest-parallel-times')
@total_ordering
class Task(object):
"""Stores information about a task (single execution of a test).
This class stores information about the test to be executed (gtest binary and
test name), and its result (log file, exit code and runtime).
Each task is uniquely identified by the gtest binary, the test name and an
execution number that increases each time the test is executed.
Additionaly we store the last execution time, so that next time the test is
executed, the slowest tests are run first.
"""
def __init__(self, test_binary, test_name, test_command, execution_number,
last_execution_time, output_dir):
self.test_name = test_name
self.output_dir = output_dir
self.test_binary = test_binary
self.test_command = test_command
self.execution_number = execution_number
self.last_execution_time = last_execution_time
self.exit_code = None
self.runtime_ms = None
self.test_id = (test_binary, test_name)
self.task_id = (test_binary, test_name, self.execution_number)
self.log_file = Task._logname(self.output_dir, self.test_binary, test_name,
self.execution_number)
def __sorting_key(self):
# Unseen or failing tests (both missing execution time) take precedence over
# execution time. Tests are greater (seen as slower) when missing times so
# that they are executed first.
return (1 if self.last_execution_time is None else 0,
self.last_execution_time)
def __eq__(self, other):
return self.__sorting_key() == other.__sorting_key()
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return self.__sorting_key() < other.__sorting_key()
@staticmethod
def _normalize(string):
return re.sub('[^A-Za-z0-9]', '_', string)
@staticmethod
def _logname(output_dir, test_binary, test_name, execution_number):
# Store logs to temporary files if there is no output_dir.
if output_dir is None:
(log_handle, log_name) = tempfile.mkstemp(prefix='gtest_parallel_',
suffix=".log")
os.close(log_handle)
return log_name
log_name = '%s-%s-%d.log' % (Task._normalize(os.path.basename(test_binary)),
Task._normalize(test_name), execution_number)
return os.path.join(output_dir, log_name)
def run(self):
begin = time.time()
with open(self.log_file, 'w') as log:
task = subprocess.Popen(self.test_command, stdout=log, stderr=log)
try:
self.exit_code = sigint_handler.wait(task)
except sigint_handler.ProcessWasInterrupted:
thread.exit()
self.runtime_ms = int(1000 * (time.time() - begin))
self.last_execution_time = None if self.exit_code else self.runtime_ms
class TaskManager(object):
"""Executes the tasks and stores the passed, failed and interrupted tasks.
When a task is run, this class keeps track if it passed, failed or was
interrupted. After a task finishes it calls the relevant functions of the
Logger, TestResults and TestTimes classes, and in case of failure, retries the
test as specified by the --retry_failed flag.
"""
def __init__(self, times, logger, test_results, task_factory, times_to_retry,
initial_execution_number):
self.times = times
self.logger = logger
self.test_results = test_results
self.task_factory = task_factory
self.times_to_retry = times_to_retry
self.initial_execution_number = initial_execution_number
self.global_exit_code = 0
self.passed = []
self.failed = []
self.started = {}
self.execution_number = {}
self.lock = threading.Lock()
def __get_next_execution_number(self, test_id):
with self.lock:
next_execution_number = self.execution_number.setdefault(
test_id, self.initial_execution_number)
self.execution_number[test_id] += 1
return next_execution_number
def __register_start(self, task):
with self.lock:
self.started[task.task_id] = task
def register_exit(self, task):
self.logger.log_exit(task)
self.times.record_test_time(task.test_binary, task.test_name,
task.last_execution_time)
if self.test_results:
self.test_results.log(task.test_name, task.runtime_ms / 1000.0,
task.exit_code)
with self.lock:
self.started.pop(task.task_id)
if task.exit_code == 0:
self.passed.append(task)
else:
self.failed.append(task)
def run_task(self, task):
for try_number in range(self.times_to_retry + 1):
self.__register_start(task)
task.run()
self.register_exit(task)
if task.exit_code == 0:
break
if try_number < self.times_to_retry:
execution_number = self.__get_next_execution_number(task.test_id)
# We need create a new Task instance. Each task represents a single test
# execution, with its own runtime, exit code and log file.
task = self.task_factory(task.test_binary, task.test_name,
task.test_command, execution_number,
task.last_execution_time, task.output_dir)
with self.lock:
if task.exit_code != 0:
self.global_exit_code = task.exit_code
class FilterFormat(object):
def __init__(self, output_dir):
if sys.stdout.isatty():
# stdout needs to be unbuffered since the output is interactive.
if isinstance(sys.stdout, io.TextIOWrapper):
# workaround for https://bugs.python.org/issue17404
sys.stdout = io.TextIOWrapper(sys.stdout.detach(),
line_buffering=True,
write_through=True,
newline='\n')
else:
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
self.output_dir = output_dir
self.total_tasks = 0
self.finished_tasks = 0
self.out = Outputter(sys.stdout)
self.stdout_lock = threading.Lock()
def move_to(self, destination_dir, tasks):
if self.output_dir is None:
return
destination_dir = os.path.join(self.output_dir, destination_dir)
os.makedirs(destination_dir)
for task in tasks:
shutil.move(task.log_file, destination_dir)
def print_tests(self, message, tasks, print_try_number):
self.out.permanent_line("%s (%s/%s):" %
(message, len(tasks), self.total_tasks))
for task in sorted(tasks):
runtime_ms = 'Interrupted'
if task.runtime_ms is not None:
runtime_ms = '%d ms' % task.runtime_ms
self.out.permanent_line(
"%11s: %s %s%s" %
(runtime_ms, task.test_binary, task.test_name,
(" (try #%d)" % task.execution_number) if print_try_number else ""))
def log_exit(self, task):
with self.stdout_lock:
self.finished_tasks += 1
self.out.transient_line("[%d/%d] %s (%d ms)" %
(self.finished_tasks, self.total_tasks,
task.test_name, task.runtime_ms))
if task.exit_code != 0:
with open(task.log_file) as f:
for line in f.readlines():
self.out.permanent_line(line.rstrip())
if task.exit_code is None:
self.out.permanent_line("[%d/%d] %s aborted after %d ms" %
(self.finished_tasks, self.total_tasks,
task.test_name, task.runtime_ms))
else:
self.out.permanent_line(
"[%d/%d] %s returned with exit code %d (%d ms)" %
(self.finished_tasks, self.total_tasks, task.test_name,
task.exit_code, task.runtime_ms))
if self.output_dir is None:
# Try to remove the file 100 times (sleeping for 0.1 second in between).
# This is a workaround for a process handle seemingly holding on to the
# file for too long inside os.subprocess. This workaround is in place
# until we figure out a minimal repro to report upstream (or a better
# suspect) to prevent os.remove exceptions.
num_tries = 100
for i in range(num_tries):
try:
os.remove(task.log_file)
except OSError as e:
if e.errno is not errno.ENOENT:
if i is num_tries - 1:
self.out.permanent_line('Could not remove temporary log file: ' +
str(e))
else:
time.sleep(0.1)
continue
break
def log_tasks(self, total_tasks):
self.total_tasks += total_tasks
self.out.transient_line("[0/%d] Running tests..." % self.total_tasks)
def summarize(self, passed_tasks, failed_tasks, interrupted_tasks):
stats = {}
def add_stats(stats, task, idx):
task_key = (task.test_binary, task.test_name)
if not task_key in stats:
# (passed, failed, interrupted) task_key is added as tie breaker to get
# alphabetic sorting on equally-stable tests
stats[task_key] = [0, 0, 0, task_key]
stats[task_key][idx] += 1
for task in passed_tasks:
add_stats(stats, task, 0)
for task in failed_tasks:
add_stats(stats, task, 1)
for task in interrupted_tasks:
add_stats(stats, task, 2)
self.out.permanent_line("SUMMARY:")
for task_key in sorted(stats, key=stats.__getitem__):
(num_passed, num_failed, num_interrupted, _) = stats[task_key]
(test_binary, task_name) = task_key
total_runs = num_passed + num_failed + num_interrupted
if num_passed == total_runs:
continue
self.out.permanent_line(" %s %s passed %d / %d times%s." %
(test_binary, task_name, num_passed, total_runs,
"" if num_interrupted == 0 else
(" (%d interrupted)" % num_interrupted)))
def flush(self):
self.out.flush_transient_output()
class CollectTestResults(object):
def __init__(self, json_dump_filepath):
self.test_results_lock = threading.Lock()
self.json_dump_file = open(json_dump_filepath, 'w')
self.test_results = {
"interrupted": False,
"path_delimiter": ".",
# Third version of the file format. See the link in the flag description
# for details.
"version": 3,
"seconds_since_epoch": int(time.time()),
"num_failures_by_type": {
"PASS": 0,
"FAIL": 0,
"TIMEOUT": 0,
},
"tests": {},
}
def log(self, test, runtime_seconds, exit_code):
if exit_code is None:
actual_result = "TIMEOUT"
elif exit_code == 0:
actual_result = "PASS"
else:
actual_result = "FAIL"
with self.test_results_lock:
self.test_results['num_failures_by_type'][actual_result] += 1
results = self.test_results['tests']
for name in test.split('.'):
results = results.setdefault(name, {})
if results:
results['actual'] += ' ' + actual_result
results['times'].append(runtime_seconds)
else: # This is the first invocation of the test
results['actual'] = actual_result
results['times'] = [runtime_seconds]
results['time'] = runtime_seconds
results['expected'] = 'PASS'
def dump_to_file_and_close(self):
json.dump(self.test_results, self.json_dump_file)
self.json_dump_file.close()
# Record of test runtimes. Has built-in locking.
class TestTimes(object):
class LockedFile(object):
def __init__(self, filename, mode):
self._filename = filename
self._mode = mode
self._fo = None
def __enter__(self):
self._fo = open(self._filename, self._mode)
# Regardless of opening mode we always seek to the beginning of file.
# This simplifies code working with LockedFile and also ensures that
# we lock (and unlock below) always the same region in file on win32.
self._fo.seek(0)
try:
if sys.platform == 'win32':
# We are locking here fixed location in file to use it as
# an exclusive lock on entire file.
msvcrt.locking(self._fo.fileno(), msvcrt.LK_LOCK, 1)
else:
fcntl.flock(self._fo.fileno(), fcntl.LOCK_EX)
except IOError:
self._fo.close()
raise
return self._fo
def __exit__(self, exc_type, exc_value, traceback):
# Flush any buffered data to disk. This is needed to prevent race
# condition which happens from the moment of releasing file lock
# till closing the file.
self._fo.flush()
try:
if sys.platform == 'win32':
self._fo.seek(0)
msvcrt.locking(self._fo.fileno(), msvcrt.LK_UNLCK, 1)
else:
fcntl.flock(self._fo.fileno(), fcntl.LOCK_UN)
finally:
self._fo.close()
return exc_value is None
def __init__(self, save_file):
"Create new object seeded with saved test times from the given file."
self.__times = {} # (test binary, test name) -> runtime in ms
# Protects calls to record_test_time(); other calls are not
# expected to be made concurrently.
self.__lock = threading.Lock()
try:
with TestTimes.LockedFile(save_file, 'rb') as fd:
times = TestTimes.__read_test_times_file(fd)
except IOError:
# We couldn't obtain the lock.
return
# Discard saved times if the format isn't right.
if type(times) is not dict:
return
for ((test_binary, test_name), runtime) in times.items():
if (type(test_binary) is not str or type(test_name) is not str
or type(runtime) not in {int, long, type(None)}):
return
self.__times = times
def get_test_time(self, binary, testname):
"""Return the last duration for the given test as an integer number of
milliseconds, or None if the test failed or if there's no record for it."""
return self.__times.get((binary, testname), None)
def record_test_time(self, binary, testname, runtime_ms):
"""Record that the given test ran in the specified number of
milliseconds. If the test failed, runtime_ms should be None."""
with self.__lock:
self.__times[(binary, testname)] = runtime_ms
def write_to_file(self, save_file):
"Write all the times to file."
try:
with TestTimes.LockedFile(save_file, 'a+b') as fd:
times = TestTimes.__read_test_times_file(fd)
if times is None:
times = self.__times
else:
times.update(self.__times)
# We erase data from file while still holding a lock to it. This
# way reading old test times and appending new ones are atomic
# for external viewer.
fd.seek(0)
fd.truncate()
with gzip.GzipFile(fileobj=fd, mode='wb') as gzf:
cPickle.dump(times, gzf, PICKLE_HIGHEST_PROTOCOL)
except IOError:
pass # ignore errors---saving the times isn't that important
@staticmethod
def __read_test_times_file(fd):
try:
with gzip.GzipFile(fileobj=fd, mode='rb') as gzf:
times = cPickle.load(gzf)
except Exception:
# File doesn't exist, isn't readable, is malformed---whatever.
# Just ignore it.
return None
else:
return times
def find_tests(binaries, additional_args, options, times):
test_count = 0
tasks = []
for test_binary in binaries:
command = [test_binary] + additional_args
if options.gtest_also_run_disabled_tests:
command += ['--gtest_also_run_disabled_tests']
list_command = command + ['--gtest_list_tests']
if options.gtest_filter != '':
list_command += ['--gtest_filter=' + options.gtest_filter]
try:
test_list = subprocess.check_output(list_command,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
sys.exit("%s: %s\n%s" % (test_binary, str(e), e.output))
try:
test_list = test_list.split('\n')
except TypeError:
# subprocess.check_output() returns bytes in python3
test_list = test_list.decode(sys.stdout.encoding).split('\n')
command += ['--gtest_color=' + options.gtest_color]
test_group = ''
for line in test_list:
if not line.strip():
continue
if line[0] != " ":
# Remove comments for typed tests and strip whitespace.
test_group = line.split('#')[0].strip()
continue
# Remove comments for parameterized tests and strip whitespace.
line = line.split('#')[0].strip()
if not line:
continue
test_name = test_group + line
if not options.gtest_also_run_disabled_tests and 'DISABLED_' in test_name:
continue
# Skip PRE_ tests which are used by Chromium.
if '.PRE_' in test_name:
continue
last_execution_time = times.get_test_time(test_binary, test_name)
if options.failed and last_execution_time is not None:
continue
test_command = command + ['--gtest_filter=' + test_name]
if (test_count - options.shard_index) % options.shard_count == 0:
for execution_number in range(options.repeat):
tasks.append(
Task(test_binary, test_name, test_command, execution_number + 1,
last_execution_time, options.output_dir))
test_count += 1
# Sort the tasks to run the slowest tests first, so that faster ones can be
# finished in parallel.
return sorted(tasks, reverse=True)
def execute_tasks(tasks, pool_size, task_manager, timeout_seconds,
serialize_test_cases):
class WorkerFn(object):
def __init__(self, tasks, running_groups):
self.tasks = tasks
self.running_groups = running_groups
self.task_lock = threading.Lock()
def __call__(self):
while True:
with self.task_lock:
for task_id in range(len(self.tasks)):
task = self.tasks[task_id]
if self.running_groups is not None:
test_group = task.test_name.split('.')[0]
if test_group in self.running_groups:
# Try to find other non-running test group.
continue
else:
self.running_groups.add(test_group)
del self.tasks[task_id]
break
else:
# Either there is no tasks left or number or remaining test
# cases (groups) is less than number or running threads.
return
task_manager.run_task(task)
if self.running_groups is not None:
with self.task_lock:
self.running_groups.remove(test_group)
def start_daemon(func):
t = threading.Thread(target=func)
t.daemon = True
t.start()
return t
timeout = None
try:
if timeout_seconds:
timeout = threading.Timer(timeout_seconds, sigint_handler.interrupt)
timeout.start()
running_groups = set() if serialize_test_cases else None
worker_fn = WorkerFn(tasks, running_groups)
workers = [start_daemon(worker_fn) for _ in range(pool_size)]
for worker in workers:
worker.join()
finally:
if timeout:
timeout.cancel()
for task in list(task_manager.started.values()):
task.runtime_ms = timeout_seconds * 1000
task_manager.register_exit(task)
def default_options_parser():
parser = optparse.OptionParser(
usage='usage: %prog [options] binary [binary ...] -- [additional args]')
parser.add_option('-d',
'--output_dir',
type='string',
default=None,
help='Output directory for test logs. Logs will be '
'available under gtest-parallel-logs/, so '
'--output_dir=/tmp will results in all logs being '
'available under /tmp/gtest-parallel-logs/.')
parser.add_option('-r',
'--repeat',
type='int',
default=1,
help='Number of times to execute all the tests.')
parser.add_option('--retry_failed',
type='int',
default=0,
help='Number of times to repeat failed tests.')
parser.add_option('--failed',
action='store_true',
default=False,
help='run only failed and new tests')
parser.add_option('-w',
'--workers',
type='int',
default=multiprocessing.cpu_count(),
help='number of workers to spawn')
parser.add_option('--gtest_color',
type='string',
default='yes',
help='color output')
parser.add_option('--gtest_filter',
type='string',
default='',
help='test filter')
parser.add_option('--gtest_also_run_disabled_tests',
action='store_true',
default=False,
help='run disabled tests too')
parser.add_option(
'--print_test_times',
action='store_true',
default=False,
help='list the run time of each test at the end of execution')
parser.add_option('--shard_count',
type='int',
default=1,
help='total number of shards (for sharding test execution '
'between multiple machines)')
parser.add_option('--shard_index',
type='int',
default=0,
help='zero-indexed number identifying this shard (for '
'sharding test execution between multiple machines)')
parser.add_option(
'--dump_json_test_results',
type='string',
default=None,
help='Saves the results of the tests as a JSON machine-'
'readable file. The format of the file is specified at '
'https://www.chromium.org/developers/the-json-test-results-format')
parser.add_option('--timeout',
type='int',
default=None,
help='Interrupt all remaining processes after the given '
'time (in seconds).')
parser.add_option('--serialize_test_cases',
action='store_true',
default=False,
help='Do not run tests from the same test '
'case in parallel.')
return parser
def main():
# Remove additional arguments (anything after --).
additional_args = []
for i in range(len(sys.argv)):
if sys.argv[i] == '--':
additional_args = sys.argv[i + 1:]
sys.argv = sys.argv[:i]
break
parser = default_options_parser()
(options, binaries) = parser.parse_args()
if (options.output_dir is not None and not os.path.isdir(options.output_dir)):
parser.error('--output_dir value must be an existing directory, '
'current value is "%s"' % options.output_dir)
# Append gtest-parallel-logs to log output, this is to avoid deleting user
# data if an user passes a directory where files are already present. If a
# user specifies --output_dir=Docs/, we'll create Docs/gtest-parallel-logs
# and clean that directory out on startup, instead of nuking Docs/.
if options.output_dir:
options.output_dir = os.path.join(options.output_dir, 'gtest-parallel-logs')
if binaries == []:
parser.print_usage()
sys.exit(1)
if options.shard_count < 1:
parser.error("Invalid number of shards: %d. Must be at least 1." %
options.shard_count)
if not (0 <= options.shard_index < options.shard_count):
parser.error("Invalid shard index: %d. Must be between 0 and %d "
"(less than the number of shards)." %
(options.shard_index, options.shard_count - 1))
# Check that all test binaries have an unique basename. That way we can ensure
# the logs are saved to unique files even when two different binaries have
# common tests.
unique_binaries = set(os.path.basename(binary) for binary in binaries)
assert len(unique_binaries) == len(binaries), (
"All test binaries must have an unique basename.")
if options.output_dir:
# Remove files from old test runs.
if os.path.isdir(options.output_dir):
shutil.rmtree(options.output_dir)
# Create directory for test log output.
try:
os.makedirs(options.output_dir)
except OSError as e:
# Ignore errors if this directory already exists.
if e.errno != errno.EEXIST or not os.path.isdir(options.output_dir):
raise e
test_results = None
if options.dump_json_test_results is not None:
test_results = CollectTestResults(options.dump_json_test_results)
save_file = get_save_file_path()
times = TestTimes(save_file)
logger = FilterFormat(options.output_dir)
task_manager = TaskManager(times, logger, test_results, Task,
options.retry_failed, options.repeat + 1)
tasks = find_tests(binaries, additional_args, options, times)
logger.log_tasks(len(tasks))
execute_tasks(tasks, options.workers, task_manager, options.timeout,
options.serialize_test_cases)
print_try_number = options.retry_failed > 0 or options.repeat > 1
if task_manager.passed:
logger.move_to('passed', task_manager.passed)
if options.print_test_times:
logger.print_tests('PASSED TESTS', task_manager.passed, print_try_number)
if task_manager.failed:
logger.print_tests('FAILED TESTS', task_manager.failed, print_try_number)
logger.move_to('failed', task_manager.failed)
if task_manager.started:
logger.print_tests('INTERRUPTED TESTS', task_manager.started.values(),
print_try_number)
logger.move_to('interrupted', task_manager.started.values())
if options.repeat > 1 and (task_manager.failed or task_manager.started):
logger.summarize(task_manager.passed, task_manager.failed,
task_manager.started.values())
logger.flush()
times.write_to_file(save_file)
if test_results:
test_results.dump_to_file_and_close()
if sigint_handler.got_sigint():
return -signal.SIGINT
return task_manager.global_exit_code
if __name__ == "__main__":
sys.exit(main())

173
third-party/gtest-parallel/gtest_parallel_mocks.py generated vendored Normal file
View file

@ -0,0 +1,173 @@
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import threading
import time
class LoggerMock(object):
def __init__(self, test_lib):
self.test_lib = test_lib
self.runtimes = collections.defaultdict(list)
self.exit_codes = collections.defaultdict(list)
self.last_execution_times = collections.defaultdict(list)
self.execution_numbers = collections.defaultdict(list)
def log_exit(self, task):
self.runtimes[task.test_id].append(task.runtime_ms)
self.exit_codes[task.test_id].append(task.exit_code)
self.last_execution_times[task.test_id].append(task.last_execution_time)
self.execution_numbers[task.test_id].append(task.execution_number)
def assertRecorded(self, test_id, expected, retries):
self.test_lib.assertIn(test_id, self.runtimes)
self.test_lib.assertListEqual(expected['runtime_ms'][:retries],
self.runtimes[test_id])
self.test_lib.assertListEqual(expected['exit_code'][:retries],
self.exit_codes[test_id])
self.test_lib.assertListEqual(expected['last_execution_time'][:retries],
self.last_execution_times[test_id])
self.test_lib.assertListEqual(expected['execution_number'][:retries],
self.execution_numbers[test_id])
class TestTimesMock(object):
def __init__(self, test_lib, test_data=None):
self.test_lib = test_lib
self.test_data = test_data or {}
self.last_execution_times = collections.defaultdict(list)
def record_test_time(self, test_binary, test_name, last_execution_time):
test_id = (test_binary, test_name)
self.last_execution_times[test_id].append(last_execution_time)
def get_test_time(self, test_binary, test_name):
test_group, test = test_name.split('.')
return self.test_data.get(test_binary, {}).get(test_group,
{}).get(test, None)
def assertRecorded(self, test_id, expected, retries):
self.test_lib.assertIn(test_id, self.last_execution_times)
self.test_lib.assertListEqual(expected['last_execution_time'][:retries],
self.last_execution_times[test_id])
class TestResultsMock(object):
def __init__(self, test_lib):
self.results = []
self.test_lib = test_lib
def log(self, test_name, runtime_ms, actual_result):
self.results.append((test_name, runtime_ms, actual_result))
def assertRecorded(self, test_id, expected, retries):
test_results = [
(test_id[1], runtime_ms / 1000.0, exit_code)
for runtime_ms, exit_code in zip(expected['runtime_ms'][:retries],
expected['exit_code'][:retries])
]
for test_result in test_results:
self.test_lib.assertIn(test_result, self.results)
class TaskManagerMock(object):
def __init__(self):
self.running_groups = []
self.check_lock = threading.Lock()
self.had_running_parallel_groups = False
self.total_tasks_run = 0
self.started = {}
def __register_start(self, task):
self.started[task.task_id] = task
def register_exit(self, task):
self.started.pop(task.task_id)
def run_task(self, task):
self.__register_start(task)
test_group = task.test_name.split('.')[0]
with self.check_lock:
self.total_tasks_run += 1
if test_group in self.running_groups:
self.had_running_parallel_groups = True
self.running_groups.append(test_group)
# Delay as if real test were run.
time.sleep(0.001)
with self.check_lock:
self.running_groups.remove(test_group)
class TaskMockFactory(object):
def __init__(self, test_data):
self.data = test_data
self.passed = []
self.failed = []
def get_task(self, test_id, execution_number=0):
task = TaskMock(test_id, execution_number, self.data[test_id])
if task.exit_code == 0:
self.passed.append(task)
else:
self.failed.append(task)
return task
def __call__(self, test_binary, test_name, test_command, execution_number,
last_execution_time, output_dir):
return self.get_task((test_binary, test_name), execution_number)
class TaskMock(object):
def __init__(self, test_id, execution_number, test_data):
self.test_id = test_id
self.execution_number = execution_number
self.runtime_ms = test_data['runtime_ms'][execution_number]
self.exit_code = test_data['exit_code'][execution_number]
self.last_execution_time = (
test_data['last_execution_time'][execution_number])
if 'log_file' in test_data:
self.log_file = test_data['log_file'][execution_number]
else:
self.log_file = None
self.test_command = None
self.output_dir = None
self.test_binary = test_id[0]
self.test_name = test_id[1]
self.task_id = (test_id[0], test_id[1], execution_number)
def run(self):
pass
class SubprocessMock(object):
def __init__(self, test_data=None):
self._test_data = test_data
self.last_invocation = None
def __call__(self, command, **kwargs):
self.last_invocation = command
binary = command[0]
test_list = []
tests_for_binary = sorted(self._test_data.get(binary, {}).items())
for test_group, tests in tests_for_binary:
test_list.append(test_group + ".")
for test in sorted(tests):
test_list.append(" " + test)
return '\n'.join(test_list)

651
third-party/gtest-parallel/gtest_parallel_tests.py generated vendored Normal file
View file

@ -0,0 +1,651 @@
#!/usr/bin/env python
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os.path
import random
import shutil
import sys
import tempfile
import threading
import unittest
import gtest_parallel
from gtest_parallel_mocks import LoggerMock
from gtest_parallel_mocks import SubprocessMock
from gtest_parallel_mocks import TestTimesMock
from gtest_parallel_mocks import TestResultsMock
from gtest_parallel_mocks import TaskManagerMock
from gtest_parallel_mocks import TaskMockFactory
from gtest_parallel_mocks import TaskMock
@contextlib.contextmanager
def guard_temp_dir():
try:
temp_dir = tempfile.mkdtemp()
yield temp_dir
finally:
shutil.rmtree(temp_dir)
@contextlib.contextmanager
def guard_temp_subdir(temp_dir, *path):
assert path, 'Path should not be empty'
try:
temp_subdir = os.path.join(temp_dir, *path)
os.makedirs(temp_subdir)
yield temp_subdir
finally:
shutil.rmtree(os.path.join(temp_dir, path[0]))
@contextlib.contextmanager
def guard_patch_module(import_name, new_val):
def patch(module, names, val):
if len(names) == 1:
old = getattr(module, names[0])
setattr(module, names[0], val)
return old
else:
return patch(getattr(module, names[0]), names[1:], val)
try:
old_val = patch(gtest_parallel, import_name.split('.'), new_val)
yield old_val
finally:
patch(gtest_parallel, import_name.split('.'), old_val)
class TestTaskManager(unittest.TestCase):
def setUp(self):
self.passing_task = (('fake_binary', 'Fake.PassingTest'), {
'runtime_ms': [10],
'exit_code': [0],
'last_execution_time': [10],
})
self.failing_task = (('fake_binary', 'Fake.FailingTest'), {
'runtime_ms': [20, 30, 40],
'exit_code': [1, 1, 1],
'last_execution_time': [None, None, None],
})
self.fails_once_then_succeeds = (('another_binary', 'Fake.Test.FailOnce'), {
'runtime_ms': [21, 22],
'exit_code': [1, 0],
'last_execution_time': [None, 22],
})
self.fails_twice_then_succeeds = (('yet_another_binary',
'Fake.Test.FailTwice'), {
'runtime_ms': [23, 25, 24],
'exit_code': [1, 1, 0],
'last_execution_time':
[None, None, 24],
})
def execute_tasks(self, tasks, retries, expected_exit_code):
repeat = 1
times = TestTimesMock(self)
logger = LoggerMock(self)
test_results = TestResultsMock(self)
task_mock_factory = TaskMockFactory(dict(tasks))
task_manager = gtest_parallel.TaskManager(times, logger, test_results,
task_mock_factory, retries,
repeat)
for test_id, expected in tasks:
task = task_mock_factory.get_task(test_id)
task_manager.run_task(task)
expected['execution_number'] = list(range(len(expected['exit_code'])))
logger.assertRecorded(test_id, expected, retries + 1)
times.assertRecorded(test_id, expected, retries + 1)
test_results.assertRecorded(test_id, expected, retries + 1)
self.assertEqual(len(task_manager.started), 0)
self.assertListEqual(
sorted(task.task_id for task in task_manager.passed),
sorted(task.task_id for task in task_mock_factory.passed))
self.assertListEqual(
sorted(task.task_id for task in task_manager.failed),
sorted(task.task_id for task in task_mock_factory.failed))
self.assertEqual(task_manager.global_exit_code, expected_exit_code)
def test_passing_task_succeeds(self):
self.execute_tasks(tasks=[self.passing_task],
retries=0,
expected_exit_code=0)
def test_failing_task_fails(self):
self.execute_tasks(tasks=[self.failing_task],
retries=0,
expected_exit_code=1)
def test_failing_task_fails_even_with_retries(self):
self.execute_tasks(tasks=[self.failing_task],
retries=2,
expected_exit_code=1)
def test_executing_passing_and_failing_fails(self):
# Executing both a faling test and a passing one should make gtest-parallel
# fail, no matter if the failing task is run first or last.
self.execute_tasks(tasks=[self.failing_task, self.passing_task],
retries=2,
expected_exit_code=1)
self.execute_tasks(tasks=[self.passing_task, self.failing_task],
retries=2,
expected_exit_code=1)
def test_task_succeeds_with_one_retry(self):
# Executes test and retries once. The first run should fail and the second
# succeed, so gtest-parallel should succeed.
self.execute_tasks(tasks=[self.fails_once_then_succeeds],
retries=1,
expected_exit_code=0)
def test_task_fails_with_one_retry(self):
# Executes test and retries once, not enough for the test to start passing,
# so gtest-parallel should return an error.
self.execute_tasks(tasks=[self.fails_twice_then_succeeds],
retries=1,
expected_exit_code=1)
def test_runner_succeeds_when_all_tasks_eventually_succeeds(self):
# Executes the test and retries twice. One test should pass in the first
# attempt, another should take two runs, and the last one should take three
# runs. All tests should succeed, so gtest-parallel should succeed too.
self.execute_tasks(tasks=[
self.passing_task, self.fails_once_then_succeeds,
self.fails_twice_then_succeeds
],
retries=2,
expected_exit_code=0)
class TestSaveFilePath(unittest.TestCase):
class StreamMock(object):
def write(*args):
# Suppress any output.
pass
def test_get_save_file_path_unix(self):
with guard_temp_dir() as temp_dir, \
guard_patch_module('os.path.expanduser', lambda p: temp_dir), \
guard_patch_module('sys.stderr', TestSaveFilePath.StreamMock()), \
guard_patch_module('sys.platform', 'darwin'):
with guard_patch_module('os.environ', {}), \
guard_temp_subdir(temp_dir, '.cache'):
self.assertEqual(os.path.join(temp_dir, '.cache', 'gtest-parallel'),
gtest_parallel.get_save_file_path())
with guard_patch_module('os.environ', {'XDG_CACHE_HOME': temp_dir}):
self.assertEqual(os.path.join(temp_dir, 'gtest-parallel'),
gtest_parallel.get_save_file_path())
with guard_patch_module('os.environ',
{'XDG_CACHE_HOME': os.path.realpath(__file__)}):
self.assertEqual(os.path.join(temp_dir, '.gtest-parallel-times'),
gtest_parallel.get_save_file_path())
def test_get_save_file_path_win32(self):
with guard_temp_dir() as temp_dir, \
guard_patch_module('os.path.expanduser', lambda p: temp_dir), \
guard_patch_module('sys.stderr', TestSaveFilePath.StreamMock()), \
guard_patch_module('sys.platform', 'win32'):
with guard_patch_module('os.environ', {}), \
guard_temp_subdir(temp_dir, 'AppData', 'Local'):
self.assertEqual(
os.path.join(temp_dir, 'AppData', 'Local', 'gtest-parallel'),
gtest_parallel.get_save_file_path())
with guard_patch_module('os.environ', {'LOCALAPPDATA': temp_dir}):
self.assertEqual(os.path.join(temp_dir, 'gtest-parallel'),
gtest_parallel.get_save_file_path())
with guard_patch_module('os.environ',
{'LOCALAPPDATA': os.path.realpath(__file__)}):
self.assertEqual(os.path.join(temp_dir, '.gtest-parallel-times'),
gtest_parallel.get_save_file_path())
class TestSerializeTestCases(unittest.TestCase):
def _execute_tasks(self, max_number_of_test_cases,
max_number_of_tests_per_test_case, max_number_of_repeats,
max_number_of_workers, serialize_test_cases):
tasks = []
for test_case in range(max_number_of_test_cases):
for test_name in range(max_number_of_tests_per_test_case):
# All arguments for gtest_parallel.Task except for test_name are fake.
test_name = 'TestCase{}.test{}'.format(test_case, test_name)
for execution_number in range(random.randint(1, max_number_of_repeats)):
tasks.append(
gtest_parallel.Task('path/to/binary', test_name,
['path/to/binary', '--gtest_filter=*'],
execution_number + 1, None, 'path/to/output'))
expected_tasks_number = len(tasks)
task_manager = TaskManagerMock()
gtest_parallel.execute_tasks(tasks, max_number_of_workers, task_manager,
None, serialize_test_cases)
self.assertEqual(serialize_test_cases,
not task_manager.had_running_parallel_groups)
self.assertEqual(expected_tasks_number, task_manager.total_tasks_run)
def test_running_parallel_test_cases_without_repeats(self):
self._execute_tasks(max_number_of_test_cases=4,
max_number_of_tests_per_test_case=32,
max_number_of_repeats=1,
max_number_of_workers=16,
serialize_test_cases=True)
def test_running_parallel_test_cases_with_repeats(self):
self._execute_tasks(max_number_of_test_cases=4,
max_number_of_tests_per_test_case=32,
max_number_of_repeats=4,
max_number_of_workers=16,
serialize_test_cases=True)
def test_running_parallel_tests(self):
self._execute_tasks(max_number_of_test_cases=4,
max_number_of_tests_per_test_case=128,
max_number_of_repeats=1,
max_number_of_workers=16,
serialize_test_cases=False)
class TestTestTimes(unittest.TestCase):
def test_race_in_test_times_load_save(self):
max_number_of_workers = 8
max_number_of_read_write_cycles = 64
test_times_file_name = 'test_times.pickle'
def start_worker(save_file):
def test_times_worker():
thread_id = threading.current_thread().ident
path_to_binary = 'path/to/binary' + hex(thread_id)
for cnt in range(max_number_of_read_write_cycles):
times = gtest_parallel.TestTimes(save_file)
threads_test_times = [
binary for (binary, _) in times._TestTimes__times.keys()
if binary.startswith(path_to_binary)
]
self.assertEqual(cnt, len(threads_test_times))
times.record_test_time('{}-{}'.format(path_to_binary, cnt),
'TestFoo.testBar', 1000)
times.write_to_file(save_file)
self.assertEqual(
1000,
times.get_test_time('{}-{}'.format(path_to_binary, cnt),
'TestFoo.testBar'))
self.assertIsNone(
times.get_test_time('{}-{}'.format(path_to_binary, cnt), 'baz'))
t = threading.Thread(target=test_times_worker)
t.start()
return t
with guard_temp_dir() as temp_dir:
try:
workers = [
start_worker(os.path.join(temp_dir, test_times_file_name))
for _ in range(max_number_of_workers)
]
finally:
for worker in workers:
worker.join()
class TestTimeoutTestCases(unittest.TestCase):
def test_task_timeout(self):
timeout = 1
task = gtest_parallel.Task('test_binary', 'test_name', ['test_command'], 1,
None, 'output_dir')
tasks = [task]
task_manager = TaskManagerMock()
gtest_parallel.execute_tasks(tasks, 1, task_manager, timeout, True)
self.assertEqual(1, task_manager.total_tasks_run)
self.assertEqual(None, task.exit_code)
self.assertEqual(1000, task.runtime_ms)
class TestTask(unittest.TestCase):
def test_log_file_names(self):
def root():
return 'C:\\' if sys.platform == 'win32' else '/'
self.assertEqual(os.path.join('.', 'bin-Test_case-100.log'),
gtest_parallel.Task._logname('.', 'bin', 'Test.case', 100))
self.assertEqual(
os.path.join('..', 'a', 'b', 'bin-Test_case_2-1.log'),
gtest_parallel.Task._logname(os.path.join('..', 'a', 'b'),
os.path.join('..', 'bin'), 'Test.case/2',
1))
self.assertEqual(
os.path.join('..', 'a', 'b', 'bin-Test_case_2-5.log'),
gtest_parallel.Task._logname(os.path.join('..', 'a', 'b'),
os.path.join(root(), 'c', 'd', 'bin'),
'Test.case/2', 5))
self.assertEqual(
os.path.join(root(), 'a', 'b', 'bin-Instantiation_Test_case_2-3.log'),
gtest_parallel.Task._logname(os.path.join(root(), 'a', 'b'),
os.path.join('..', 'c', 'bin'),
'Instantiation/Test.case/2', 3))
self.assertEqual(
os.path.join(root(), 'a', 'b', 'bin-Test_case-1.log'),
gtest_parallel.Task._logname(os.path.join(root(), 'a', 'b'),
os.path.join(root(), 'c', 'd', 'bin'),
'Test.case', 1))
def test_logs_to_temporary_files_without_output_dir(self):
log_file = gtest_parallel.Task._logname(None, None, None, None)
self.assertEqual(tempfile.gettempdir(), os.path.dirname(log_file))
os.remove(log_file)
def _execute_run_test(self, run_test_body, interrupt_test):
def popen_mock(*_args, **_kwargs):
return None
class SigHandlerMock(object):
class ProcessWasInterrupted(Exception):
pass
def wait(*_args):
if interrupt_test:
raise SigHandlerMock.ProcessWasInterrupted()
return 42
with guard_temp_dir() as temp_dir, \
guard_patch_module('subprocess.Popen', popen_mock), \
guard_patch_module('sigint_handler', SigHandlerMock()), \
guard_patch_module('thread.exit', lambda: None):
run_test_body(temp_dir)
def test_run_normal_task(self):
def run_test(temp_dir):
task = gtest_parallel.Task('fake/binary', 'test', ['fake/binary'], 1,
None, temp_dir)
self.assertFalse(os.path.isfile(task.log_file))
task.run()
self.assertTrue(os.path.isfile(task.log_file))
self.assertEqual(42, task.exit_code)
self._execute_run_test(run_test, False)
def test_run_interrupted_task_with_transient_log(self):
def run_test(_):
task = gtest_parallel.Task('fake/binary', 'test', ['fake/binary'], 1,
None, None)
self.assertTrue(os.path.isfile(task.log_file))
task.run()
self.assertTrue(os.path.isfile(task.log_file))
self.assertIsNone(task.exit_code)
self._execute_run_test(run_test, True)
class TestFilterFormat(unittest.TestCase):
def _execute_test(self, test_body, drop_output):
class StdoutMock(object):
def isatty(*_args):
return False
def write(*args):
pass
def flush(*args):
pass
with guard_temp_dir() as temp_dir, \
guard_patch_module('sys.stdout', StdoutMock()):
logger = gtest_parallel.FilterFormat(None if drop_output else temp_dir)
logger.log_tasks(42)
test_body(logger)
logger.flush()
def test_no_output_dir(self):
def run_test(logger):
passed = [
TaskMock(
('fake/binary', 'FakeTest'), 0, {
'runtime_ms': [10],
'exit_code': [0],
'last_execution_time': [10],
'log_file': [os.path.join(tempfile.gettempdir(), 'fake.log')]
})
]
open(passed[0].log_file, 'w').close()
self.assertTrue(os.path.isfile(passed[0].log_file))
logger.log_exit(passed[0])
self.assertFalse(os.path.isfile(passed[0].log_file))
logger.print_tests('', passed, True)
logger.move_to(None, passed)
logger.summarize(passed, [], [])
self._execute_test(run_test, True)
def test_with_output_dir(self):
def run_test(logger):
failed = [
TaskMock(
('fake/binary', 'FakeTest'), 0, {
'runtime_ms': [10],
'exit_code': [1],
'last_execution_time': [10],
'log_file': [os.path.join(logger.output_dir, 'fake.log')]
})
]
open(failed[0].log_file, 'w').close()
self.assertTrue(os.path.isfile(failed[0].log_file))
logger.log_exit(failed[0])
self.assertTrue(os.path.isfile(failed[0].log_file))
logger.print_tests('', failed, True)
logger.move_to('failed', failed)
self.assertFalse(os.path.isfile(failed[0].log_file))
self.assertTrue(
os.path.isfile(os.path.join(logger.output_dir, 'failed', 'fake.log')))
logger.summarize([], failed, [])
self._execute_test(run_test, False)
class TestFindTests(unittest.TestCase):
ONE_DISABLED_ONE_ENABLED_TEST = {
"fake_unittests": {
"FakeTest": {
"Test1": None,
"DISABLED_Test2": None,
}
}
}
ONE_FAILED_ONE_PASSED_TEST = {
"fake_unittests": {
"FakeTest": {
# Failed (and new) tests have no recorded runtime.
"FailedTest": None,
"Test": 1,
}
}
}
ONE_TEST = {
"fake_unittests": {
"FakeTest": {
"TestSomething": None,
}
}
}
MULTIPLE_BINARIES_MULTIPLE_TESTS_ONE_FAILURE = {
"fake_unittests": {
"FakeTest": {
"TestSomething": None,
"TestSomethingElse": 2,
},
"SomeOtherTest": {
"YetAnotherTest": 3,
},
},
"fake_tests": {
"Foo": {
"Bar": 4,
"Baz": 4,
}
}
}
def _process_options(self, options):
parser = gtest_parallel.default_options_parser()
options, binaries = parser.parse_args(options)
self.assertEqual(len(binaries), 0)
return options
def _call_find_tests(self, test_data, options=None):
subprocess_mock = SubprocessMock(test_data)
options = self._process_options(options or [])
with guard_patch_module('subprocess.check_output', subprocess_mock):
tasks = gtest_parallel.find_tests(test_data.keys(), [], options,
TestTimesMock(self, test_data))
# Clean transient tasks' log files created because
# by default now output_dir is None.
for task in tasks:
if os.path.isfile(task.log_file):
os.remove(task.log_file)
return tasks, subprocess_mock
def test_tasks_are_sorted(self):
tasks, _ = self._call_find_tests(
self.MULTIPLE_BINARIES_MULTIPLE_TESTS_ONE_FAILURE)
self.assertEqual([task.last_execution_time for task in tasks],
[None, 4, 4, 3, 2])
def test_does_not_run_disabled_tests_by_default(self):
tasks, subprocess_mock = self._call_find_tests(
self.ONE_DISABLED_ONE_ENABLED_TEST)
self.assertEqual(len(tasks), 1)
self.assertFalse("DISABLED_" in tasks[0].test_name)
self.assertNotIn("--gtest_also_run_disabled_tests",
subprocess_mock.last_invocation)
def test_runs_disabled_tests_when_asked(self):
tasks, subprocess_mock = self._call_find_tests(
self.ONE_DISABLED_ONE_ENABLED_TEST, ['--gtest_also_run_disabled_tests'])
self.assertEqual(len(tasks), 2)
self.assertEqual(sorted([task.test_name for task in tasks]),
["FakeTest.DISABLED_Test2", "FakeTest.Test1"])
self.assertIn("--gtest_also_run_disabled_tests",
subprocess_mock.last_invocation)
def test_runs_failed_tests_by_default(self):
tasks, _ = self._call_find_tests(self.ONE_FAILED_ONE_PASSED_TEST)
self.assertEqual(len(tasks), 2)
self.assertEqual(sorted([task.test_name for task in tasks]),
["FakeTest.FailedTest", "FakeTest.Test"])
self.assertEqual({task.last_execution_time for task in tasks}, {None, 1})
def test_runs_only_failed_tests_when_asked(self):
tasks, _ = self._call_find_tests(self.ONE_FAILED_ONE_PASSED_TEST,
['--failed'])
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0].test_binary, "fake_unittests")
self.assertEqual(tasks[0].test_name, "FakeTest.FailedTest")
self.assertIsNone(tasks[0].last_execution_time)
def test_does_not_apply_gtest_filter_by_default(self):
_, subprocess_mock = self._call_find_tests(self.ONE_TEST)
self.assertFalse(
any(
arg.startswith('--gtest_filter=SomeFilter')
for arg in subprocess_mock.last_invocation))
def test_applies_gtest_filter(self):
_, subprocess_mock = self._call_find_tests(self.ONE_TEST,
['--gtest_filter=SomeFilter'])
self.assertIn('--gtest_filter=SomeFilter', subprocess_mock.last_invocation)
def test_applies_gtest_color_by_default(self):
tasks, _ = self._call_find_tests(self.ONE_TEST)
self.assertEqual(len(tasks), 1)
self.assertIn('--gtest_color=yes', tasks[0].test_command)
def test_applies_gtest_color(self):
tasks, _ = self._call_find_tests(self.ONE_TEST, ['--gtest_color=Lemur'])
self.assertEqual(len(tasks), 1)
self.assertIn('--gtest_color=Lemur', tasks[0].test_command)
def test_repeats_tasks_once_by_default(self):
tasks, _ = self._call_find_tests(self.ONE_TEST)
self.assertEqual(len(tasks), 1)
def test_repeats_tasks_multiple_times(self):
tasks, _ = self._call_find_tests(self.ONE_TEST, ['--repeat=3'])
self.assertEqual(len(tasks), 3)
# Test all tasks have the same test_name, test_binary and test_command
all_tasks_set = set(
(task.test_name, task.test_binary, tuple(task.test_command))
for task in tasks)
self.assertEqual(len(all_tasks_set), 1)
# Test tasks have consecutive execution_numbers starting from 1
self.assertEqual(sorted(task.execution_number for task in tasks), [1, 2, 3])
def test_gtest_list_tests_fails(self):
def exit_mock(*args):
raise AssertionError('Foo')
options = self._process_options([])
with guard_patch_module('sys.exit', exit_mock):
self.assertRaises(AssertionError, gtest_parallel.find_tests,
[sys.executable], [], options, None)
if __name__ == '__main__':
unittest.main()