mirror of
https://github.com/wesnoth/wesnoth
synced 2025-04-29 19:02:00 +00:00

These values should never BE negative, but by showing them if they are, it makes it more obvious if something is broken.
326 lines
13 KiB
Python
Executable File
326 lines
13 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
# encoding: utf-8
|
|
"""
|
|
This script runs a sequence of wml unit test scenarios.
|
|
"""
|
|
|
|
import argparse, enum, os, re, subprocess, sys
|
|
|
|
class UnexpectedTestStatusException(Exception):
|
|
"""Exception raised when a unit test doesn't return the expected result."""
|
|
pass
|
|
|
|
class UnitTestResult(enum.Enum):
|
|
"""Enum corresponding to game_launcher.hpp's unit_test_result"""
|
|
PASS = 0
|
|
FAIL = 1
|
|
TIMEOUT = 2
|
|
FAIL_LOADING_REPLAY = 3
|
|
FAIL_PLAYING_REPLAY = 4
|
|
FAIL_BROKE_STRICT = 5
|
|
FAIL_WML_EXCEPTION = 6
|
|
FAIL_BY_DEFEAT = 7
|
|
PASS_BY_VICTORY = 8
|
|
|
|
def __str__(self):
|
|
return str(self.value) + ' ' + self.name
|
|
|
|
class TestCase:
|
|
"""Represents a single line of the wml_test_schedule."""
|
|
def __init__(self, status, name):
|
|
self.status = status
|
|
self.name = name
|
|
|
|
def __str__(self):
|
|
return "TestCase<{status}, {name}>".format(status=self.status, name=self.name)
|
|
|
|
class TestResultAccumulator:
|
|
passed = 0
|
|
skipped = 0
|
|
failed = 0
|
|
crashed = 0
|
|
|
|
def __init__(self, total):
|
|
self.total = total
|
|
|
|
def pass_test(self, n = 1):
|
|
self.passed = self.passed + n
|
|
|
|
def skip_test(self, n = 1):
|
|
self.skipped = self.skipped + n
|
|
|
|
def fail_test(self, n = 1):
|
|
self.failed = self.failed + n
|
|
|
|
def crash_test(self, n = 1):
|
|
self.crashed = self.failed + n
|
|
|
|
def __bool__(self):
|
|
return self.passed + self.skipped == self.total
|
|
|
|
class TestListParser:
|
|
"""Each line in the list of tests should be formatted:
|
|
<expected return code><space><name of unit test scenario>
|
|
|
|
For example:
|
|
0 test_functionality
|
|
|
|
Lines beginning # are treated as comments.
|
|
"""
|
|
def __init__(self, options):
|
|
self.verbose = options.verbose
|
|
self.filename = options.list
|
|
|
|
def get(self, batcher):
|
|
status_name_re = re.compile(r"^(\d+) ([\w-]+)$")
|
|
test_list = []
|
|
for line in open(self.filename, mode="rt"):
|
|
line = line.strip()
|
|
if line == "" or line.startswith("#"):
|
|
continue
|
|
|
|
x = status_name_re.match(line)
|
|
if x is None:
|
|
print("Could not parse test list file: ", line)
|
|
|
|
t = TestCase(UnitTestResult(int(x.groups()[0])), x.groups()[1])
|
|
if self.verbose > 1:
|
|
print(t)
|
|
test_list.append(t)
|
|
return batcher(test_list), TestResultAccumulator(len(test_list))
|
|
|
|
def run_with_rerun_for_sdl_video(args, timeout):
|
|
"""A wrapper for subprocess.run with a workaround for the issue of travis+18.04
|
|
intermittently failing to initialise SDL.
|
|
"""
|
|
# Sanity check on the number of retries. It's a rare failure, a single retry would probably
|
|
# be enough.
|
|
sdl_retries = 0
|
|
while sdl_retries < 10:
|
|
res = subprocess.run(args, timeout=timeout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
|
retry = False
|
|
if b"Could not initialize SDL_video" in res.stdout:
|
|
retry = True
|
|
if not retry:
|
|
return res
|
|
sdl_retries += 1
|
|
print("Could not initialise SDL_video error, attempt", sdl_retries)
|
|
|
|
class WesnothRunner:
|
|
def __init__(self, options):
|
|
self.verbose = options.verbose
|
|
if options.path in ["XCode", "xcode", "Xcode"]:
|
|
import glob
|
|
path_list = []
|
|
build = "Debug" if options.debug_bin else "Release"
|
|
pattern = os.path.join("~/Library/Developer/XCode/DerivedData/The_Battle_for_Wesnoth*/Build/Products",
|
|
build, "The Battle for Wesnoth.app/Contents/MacOS/The Battle for Wesnoth")
|
|
path_list.extend(glob.glob(os.path.expanduser(pattern)))
|
|
if len(path_list) == 0:
|
|
raise FileNotFoundError("Couldn't find your xcode build dir")
|
|
if len(path_list) > 1:
|
|
# seems better than choosing one at random
|
|
raise RuntimeError("Found more than one xcode build dir")
|
|
path = path_list[0]
|
|
else:
|
|
if options.path is None:
|
|
path = os.path.split(os.path.realpath(sys.argv[0]))[0]
|
|
else:
|
|
path = options.path
|
|
if os.path.isdir(path):
|
|
path += "/wesnoth"
|
|
if options.debug_bin:
|
|
path += "-debug"
|
|
self.common_args = [path, "--nobanner"]
|
|
if os.name == 'nt':
|
|
self.common_args.append("--wnoconsole")
|
|
self.common_args.append("--wnoredirect")
|
|
if options.strict_mode:
|
|
self.common_args.append("--log-strict=warning")
|
|
if options.clean:
|
|
self.common_args.append("--noaddons")
|
|
if options.additional_arg is not None:
|
|
self.common_args.extend(options.additional_arg)
|
|
self.timeout = options.timeout
|
|
self.batch_timeout = options.batch_timeout
|
|
if self.verbose > 1:
|
|
print("Options that will be used for all Wesnoth instances:", repr(self.common_args))
|
|
|
|
def run_tests(self, test_list, test_summary):
|
|
"""Run all of the tests in a single instance of Wesnoth"""
|
|
if len(test_list) == 0:
|
|
raise ValueError("Running an empty test list")
|
|
if len(test_list) > 1:
|
|
for test in test_list:
|
|
if test.status != UnitTestResult.PASS:
|
|
raise NotImplementedError("run_tests doesn't yet support batching tests with non-zero statuses")
|
|
expected_result = test_list[0].status
|
|
args = self.common_args.copy()
|
|
for test in test_list:
|
|
args.append("-u")
|
|
args.append(test.name)
|
|
if self.timeout == 0:
|
|
if test.status == UnitTestResult.TIMEOUT:
|
|
test_summary.skip_test()
|
|
print('Skipping test', test_list[0].name, 'because timeout is disabled')
|
|
return
|
|
timeout = None
|
|
else:
|
|
if test.status == UnitTestResult.FAIL_BROKE_STRICT and not options.strict_mode:
|
|
test_summary.skip_test()
|
|
print('Skipping test', test_list[0].name, 'because strict mode is disabled')
|
|
return
|
|
if len(test_list) == 1:
|
|
timeout = self.timeout
|
|
else:
|
|
timeout = self.batch_timeout
|
|
if self.verbose > 0:
|
|
if len(test_list) == 1:
|
|
print("Running test", test_list[0].name)
|
|
else:
|
|
print("Running {count} tests ({names})".format(count=len(test_list),
|
|
names=", ".join([test.name for test in test_list])))
|
|
if self.verbose > 1:
|
|
print(repr(args))
|
|
try:
|
|
res = run_with_rerun_for_sdl_video(args, timeout)
|
|
except subprocess.TimeoutExpired as t:
|
|
if self.verbose > 0:
|
|
print("Timed out (killed by Python timeout implementation)")
|
|
res = subprocess.CompletedProcess(args, UnitTestResult.TIMEOUT.value, t.output or b'')
|
|
if self.verbose > 0:
|
|
print(res.stdout.decode('utf-8'))
|
|
if self.verbose > 1:
|
|
print("Result:", res.returncode)
|
|
num_passed = 1
|
|
if test_list[0].status == UnitTestResult.PASS:
|
|
num_passed = res.stdout.count(b"PASS TEST")
|
|
test_summary.pass_test(num_passed)
|
|
if res.returncode < 0:
|
|
print("Wesnoth exited because of signal", -res.returncode)
|
|
if options.backtrace:
|
|
print("Launching GDB for a backtrace...")
|
|
gdb_args = ["gdb", "-q", "-batch", "-ex", "start", "-ex", "continue", "-ex", "bt", "-ex", "quit", "--args"]
|
|
gdb_args.extend(args)
|
|
subprocess.run(gdb_args, timeout=240)
|
|
test_summary.crash_test()
|
|
test_summary.skip_test(len(test_list) - num_passed - 1)
|
|
raise UnexpectedTestStatusException()
|
|
returned_result = UnitTestResult(res.returncode)
|
|
if returned_result != expected_result:
|
|
if self.verbose == 0:
|
|
for line in res.stdout.decode('utf-8').splitlines():
|
|
if expected_result == UnitTestResult.PASS and line.startswith("PASS TEST"):
|
|
continue
|
|
print(line)
|
|
print("Failure, Wesnoth returned", returned_result, "for", test_list[num_passed].name, "but we expected", expected_result)
|
|
test_summary.fail_test()
|
|
test_summary.skip_test(len(test_list) - num_passed - 1)
|
|
raise UnexpectedTestStatusException()
|
|
elif test_list[0].status != UnitTestResult.PASS:
|
|
test_summary.pass_test()
|
|
|
|
def test_batcher(test_list):
|
|
"""A generator function that collects tests into batches which a single
|
|
instance of Wesnoth can run.
|
|
"""
|
|
expected_to_pass = []
|
|
for test in test_list:
|
|
if test.status == UnitTestResult.PASS:
|
|
expected_to_pass.append(test)
|
|
else:
|
|
yield [test]
|
|
if len(expected_to_pass) == 0:
|
|
return
|
|
if options.batch_max == 0:
|
|
yield expected_to_pass
|
|
return
|
|
while len(expected_to_pass) > 0:
|
|
yield expected_to_pass[0:options.batch_max]
|
|
expected_to_pass = expected_to_pass[options.batch_max:]
|
|
|
|
def test_nobatcher(test_list):
|
|
"""A generator function that provides the same API as test_batcher but
|
|
emits the tests one at a time."""
|
|
for test in test_list:
|
|
yield [test]
|
|
|
|
if __name__ == '__main__':
|
|
ap = argparse.ArgumentParser()
|
|
# The options that are mandatory to support (because they're used in the Travis script)
|
|
# are the one-letter forms of verbose, clean, timeout and backtrace.
|
|
ap.add_argument("-v", "--verbose", action="count", default=0,
|
|
help="Verbose mode. Use -v twice for very verbose mode.")
|
|
ap.add_argument("-c", "--clean", action="store_true",
|
|
help="Clean mode. (Don't load any add-ons. Used for mainline tests.)")
|
|
ap.add_argument("-a", "--additional_arg", action="append",
|
|
help="Additional arguments to go to wesnoth. For options that start with a hyphen, '--add_argument --data-dir' will give an error, use '--add_argument=--data-dir' instead.")
|
|
ap.add_argument("-t", "--timeout", type=int, default=10,
|
|
help="New timer value to use, instead of 10s as default. The value 0 means no timer, and also skips tests that expect timeout.")
|
|
ap.add_argument("-bt", "--batch-timeout", type=int, default=300,
|
|
help="New timer value to use for batched tests, instead of 300s as default.")
|
|
ap.add_argument("-bm", "--batch-max", type=int, default=0,
|
|
help="Maximum number of tests to do in a batch. Default no limit.")
|
|
ap.add_argument("-bd", "--batch-disable", action="store_const", const=1, dest='batch_max',
|
|
help="Disable test batching, may be useful if debugging a small subset of tests. Equivalent to --batch-max=1")
|
|
ap.add_argument("-s", "--no-strict", dest="strict_mode", action="store_false",
|
|
help="Disable strict mode. By default, we run wesnoth with the option --log-strict=warning to ensure errors result in a failed test.")
|
|
ap.add_argument("-d", "--debug_bin", action="store_true",
|
|
help="Run wesnoth-debug binary instead of wesnoth.")
|
|
ap.add_argument("-g", "--backtrace", action="store_true",
|
|
help="If we encounter a crash, generate a backtrace using gdb. Must have gdb installed for this option.")
|
|
ap.add_argument("-p", "--path", metavar="dir",
|
|
help="Path to wesnoth binary. By default assume it is with this script."
|
|
+ " If running on a Mac, passing 'xcode' as the path will attempt to locate the binary in Xcode derived data directories.")
|
|
ap.add_argument("-l", "--list", metavar="filename",
|
|
help="Loads list of tests from the given file.",
|
|
default="wml_test_schedule")
|
|
|
|
# Workaround for argparse not accepting option values that start with a hyphen,
|
|
# for example "-a --user-data-dir". https://bugs.python.org/issue9334
|
|
# New callers can use "-a=--user-data-dir", but compatibility with the old version
|
|
# of run_wml_tests needs support for "-a --user-data-dir".
|
|
try:
|
|
while True:
|
|
i = sys.argv.index("-a")
|
|
sys.argv[i] = "=".join(["-a", sys.argv.pop(i + 1)])
|
|
except IndexError:
|
|
pass
|
|
except ValueError:
|
|
pass
|
|
|
|
options = ap.parse_args()
|
|
|
|
if options.verbose > 1:
|
|
print(repr(options))
|
|
|
|
batcher = test_nobatcher if options.batch_max == 1 else test_batcher
|
|
test_list, test_summary = TestListParser(options).get(batcher)
|
|
runner = WesnothRunner(options)
|
|
|
|
for batch in test_list:
|
|
while len(batch) > 0:
|
|
last_passed_count = test_summary.passed
|
|
try:
|
|
runner.run_tests(batch, test_summary)
|
|
batch = []
|
|
except UnexpectedTestStatusException as e:
|
|
just_passed = test_summary.passed - last_passed_count
|
|
batch = batch[just_passed + 1 :]
|
|
test_summary.skip_test(-len(batch))
|
|
|
|
print("Result:" if options.verbose > 0 else "WML Unit Tests Result:", test_summary.passed, "of", test_summary.total, "tests passed")
|
|
|
|
if test_summary.passed != test_summary.total:
|
|
breakdown = ["{0} passed".format(test_summary.passed)]
|
|
if test_summary.failed != 0:
|
|
breakdown.append("{0} failed".format(test_summary.failed))
|
|
if test_summary.crashed != 0:
|
|
breakdown.append("{0} crashed".format(test_summary.crashed))
|
|
if test_summary.skipped != 0:
|
|
breakdown.append("{0} skipped".format(test_summary.skipped))
|
|
print(" ({0})".format(', '.join(breakdown)))
|
|
|
|
if not test_summary:
|
|
sys.exit(1)
|