qt 6.5.1 original

This commit is contained in:
kleuter
2023-10-29 23:33:08 +01:00
parent 71d22ab6b0
commit 85d238dfda
21202 changed files with 5499099 additions and 0 deletions

24
util/testrunner/README Normal file
View File

@ -0,0 +1,24 @@
qt-testrunner.py is a script that can wrap the execution of every test in Qt
in order to iron out common issues with testing. It can be turned on by
setting the environment variable TESTRUNNER=/path/to/qt-testrunner.py before
invoking ctest.
It is commonly in use in Coin, the Continuous Integration system of Qt.
It offers the following functionality
+ Reads the XML test log and understands exactly which function of the test failed.
+ Checks the exit code and if needed repeats only the failed function of the test.
+ Appends output argument to it: "-o file.xml"
+ In case the test is executed multiple times
- the previous output files are saved
- the verbosity level is increased
The script itself has a testsuite that is simply run by invoking
qtbase/util/testrunner/tests/tst_testrunner.py
Please *run this manually* before submitting a change to qt-testrunner and
make sure it's passing. The reason it does not run automatically during the
usual qtbase test run, is because
+ the test run should not depend on Python
+ we don't want to wrap the testrunner tests with testrunner.

View File

@ -0,0 +1,395 @@
#!/usr/bin/env python3
# Copyright (C) 2021 The Qt Company Ltd.
# SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only WITH Qt-GPL-exception-1.0
# !!!IMPORTANT!!! If you change anything to this script, run the testsuite
# manually and make sure it still passes, as it doesn't run automatically.
# Just execute the command line as such:
#
# ./util/testrunner/tests/tst_testrunner.py -v [--debug]
#
# ======== qt-testrunner ========
#
# This script wraps the execution of a Qt test executable, for example
# tst_whatever, and tries to iron out unpredictable test failures.
# In particular:
#
# + Appends output argument to it: "-o tst_whatever.xml,xml"
# + Checks the exit code. If it is zero, the script exits with zero,
# otherwise proceeds.
# + Reads the XML test log and Understands exactly which function
# of the test failed.
# + If no XML file is found or was invalid, the test executable
# probably CRASHed, so we *re-run the full test once again*.
# + If some testcases failed it executes only those individually
# until they pass, or until max-repeats times is reached.
#
# The regular way to use is to set the environment variable TESTRUNNER to
# point to this script before invoking ctest.
#
# NOTE: this script is crafted specifically for use with Qt tests and for
# using it in Qt's CI. For example it detects and acts specially if test
# executable is "tst_selftests" or "androidtestrunner". It also detects
# env var "COIN_CTEST_RESULTSDIR" and uses it as log-dir.
#
# TODO implement --dry-run.
# Exit codes of this script:
# 0: PASS. Either no test failed, or failed initially but passed
# in the re-runs (FLAKY PASS).
# 1: Some unexpected error of this script.
# 2: FAIL! for at least one test, even after the individual re-runs.
# 3: CRASH! for the test executable even after re-running it once.
# Or when we can't re-run individual functions for any reason.
import sys
if sys.version_info < (3, 6):
sys.stderr.write(
"Error: this test wrapper script requires Python version 3.6 at least\n")
sys.exit(1)
import argparse
import subprocess
import os
import traceback
import time
import timeit
import xml.etree.ElementTree as ET
import logging as L
from pprint import pprint
from typing import NamedTuple, Tuple, List, Optional
# Define a custom type for returning a fail incident
class WhatFailed(NamedTuple):
func: str
tag: Optional[str] = None
# In the last test re-run, we add special verbosity arguments, in an attempt
# to log more information about the failure
VERBOSE_ARGS = ["-v2", "-maxwarnings", "0"]
VERBOSE_ENV = {
"QT_LOGGING_RULES": "*=true",
"QT_MESSAGE_PATTERN": "[%{time process} %{if-debug}D%{endif}%{if-warning}W%{endif}%{if-critical}C%{endif}%{if-fatal}F%{endif}] %{category} %{file}:%{line} %{function}() - %{message}",
}
# The following special function names can not re-run individually.
NO_RERUN_FUNCTIONS = {
"initTestCase", "init", "cleanup", "cleanupTestCase"
}
# The following tests do not write XML log files properly. qt-testrunner will
# not try to append "-o" to their command-line or re-run failed testcases.
# Only add tests here if absolutely necessary!
NON_XML_GENERATING_TESTS = {
"tst_selftests", # qtestlib's selftests are using an external test framework (Catch) that does not support -o argument
"tst_QDoc", # Some of QDoc's tests are using an external test framework (Catch) that does not support -o argument
"tst_QDoc_Catch_Generators", # Some of QDoc's tests are using an external test framework (Catch) that does not support -o argument
}
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Wrap Qt test execution. This is intended to be invoked via the TESTRUNNER
environment variable before running ctest in the CI environment. The purpose
of the script is to repeat failed tests in order to iron out transient errors
caused by unpredictable factors. Individual test functions that failed are
retried up to max-repeats times until the test passes.
""",
epilog="""
Default flags: --max-repeats 5 --passes-needed 1
"""
)
parser.add_argument("testargs", metavar="TESTARGS", nargs="+",
help="Test executable and arguments")
parser.add_argument("--log-dir", metavar="DIR",
help="Where to write the XML log files with the test results of the primary test run;"
" by default write to CWD")
parser.add_argument("--max-repeats", type=int, default=5, metavar='N',
help="In case the test FAILs, repeat the failed cases this many times")
parser.add_argument("--passes-needed", type=int, default=1, metavar='M',
help="Number of repeats that need to succeed in order to return an overall PASS")
parser.add_argument("--parse-xml-testlog", metavar="file.xml",
help="Do not run the full test the first time, but parse this XML test log;"
" if the test log contains failures, then re-run the failed cases normally,"
" as indicated by the other flags")
parser.add_argument("--dry-run", action="store_true",
help="(TODO - not implemented yet) Do not run anything, just describe what would happen")
parser.add_argument("--timeout", metavar="T",
help="Timeout for each test execution in seconds")
parser.add_argument("--no-extra-args", action="store_true",
help="Do not append any extra arguments to the test command line, like"
" -o log_file.xml -v2 -vs. This will disable some functionality like the"
" failed test repetition and the verbose output on failure. This is"
" activated by default when TESTARGS is tst_selftests.")
args = parser.parse_args()
args.self_name = os.path.basename(sys.argv[0])
args.specific_extra_args = []
logging_format = args.self_name + " %(levelname)8s: %(message)s"
L.basicConfig(format=logging_format, level=L.DEBUG)
if args.log_dir is None:
if "COIN_CTEST_RESULTSDIR" in os.environ:
args.log_dir = os.environ["COIN_CTEST_RESULTSDIR"]
L.info("Will write XML test logs to directory"
" COIN_CTEST_RESULTSDIR=%s", args.log_dir)
else:
args.log_dir = "."
args.test_basename = os.path.basename(args.testargs[0])
if args.test_basename.endswith(".exe"):
args.test_basename = args.test_basename[:-4]
# QNX test wrapper just needs to be skipped to figure out test_basename
if args.test_basename == "coin_qnx_qemu_runner.sh":
args.test_basename = os.path.basename(args.testargs[1])
L.info("Detected coin_qnx_qemu_runner, test will be handled specially. Detected test basename: %s",
args.test_basename)
# On Android emulated platforms, "androidtestrunner" is invoked by CMake
# to wrap the tests. We have to append the test arguments to it after
# "--". Besides that we have to detect the basename to avoid saving the
# XML log as "androidtestrunner.xml" for all tests.
if args.test_basename == "androidtestrunner":
args.specific_extra_args = [ "--" ]
apk_arg = False
for a in args.testargs[1:]:
if a == "--apk":
apk_arg = True
elif apk_arg:
apk_arg = False
if a.endswith(".apk"):
args.test_basename = os.path.basename(a)[:-4]
break
L.info("Detected androidtestrunner, test will be handled specially. Detected test basename: %s",
args.test_basename)
if args.test_basename in NON_XML_GENERATING_TESTS:
L.info("Detected special test not able to generate XML log! Will not parse it and will not repeat individual testcases")
args.no_extra_args = True
args.max_repeats = 0
return args
def parse_log(results_file) -> List[WhatFailed]:
"""Parse the XML test log file. Return the failed testcases, if any.
Failures are considered the "fail" and "xpass" incidents.
A testcase is a function with an optional data tag."""
start_timer = timeit.default_timer()
try:
tree = ET.parse(results_file)
except FileNotFoundError:
L.error("XML log file not found: %s", results_file)
raise
except Exception as e:
L.error("Failed to parse the XML log file: %s", results_file)
with open(results_file, "rb") as f:
if os.stat(f.fileno()).st_size == 0:
L.error(" File is empty")
else:
L.error(" File Contents:\n%s\n\n",
f.read().decode("utf-8", "ignore"))
raise
root = tree.getroot()
if root.tag != "TestCase":
raise AssertionError(
f"The XML test log must have <TestCase> as root tag, but has: <{root.tag}>")
failures = []
n_passes = 0
for e1 in root:
if e1.tag == "TestFunction":
for e2 in e1: # every <TestFunction> can have many <Incident>
if e2.tag == "Incident":
if e2.attrib["type"] in ("fail", "xpass"):
func = e1.attrib["name"]
e3 = e2.find("DataTag") # every <Incident> might have a <DataTag>
if e3 is not None:
failures.append(WhatFailed(func, tag=e3.text))
else:
failures.append(WhatFailed(func))
else:
n_passes += 1
end_timer = timeit.default_timer()
t = end_timer - start_timer
L.info(f"Parsed XML file {results_file} in {t:.3f} seconds")
L.info(f"Found {n_passes} passes and {len(failures)} failures")
return failures
def run_test(arg_list: List[str], **kwargs):
L.debug("Running test command line: %s", arg_list)
proc = subprocess.run(arg_list, **kwargs)
L.info("Test process exited with code: %d", proc.returncode)
return proc
def unique_filename(test_basename: str) -> str:
timestamp = round(time.time() * 1000)
return f"{test_basename}-{timestamp}"
# Returns tuple: (exit_code, xml_logfile)
def run_full_test(test_basename, testargs: List[str], output_dir: str,
no_extra_args=False, dryrun=False,
timeout=None, specific_extra_args=[]) \
-> Tuple[int, Optional[str]]:
results_files = []
output_testargs = []
# Append arguments to write log to qtestlib XML file,
# and text to stdout.
if not no_extra_args:
filename_base = unique_filename(test_basename)
pathname_stem = os.path.join(output_dir, filename_base)
xml_output_file = f"{pathname_stem}.xml"
results_files.append(xml_output_file)
output_testargs.extend([
"-o", f"{xml_output_file},xml",
"-o", f"{pathname_stem}.junit.xml,junitxml",
"-o", f"{pathname_stem}.txt,txt",
"-o", "-,txt"
])
proc = run_test(testargs + specific_extra_args + output_testargs,
timeout=timeout)
return (proc.returncode, results_files[0] if results_files else None)
def rerun_failed_testcase(test_basename, testargs: List[str], output_dir: str,
what_failed: WhatFailed,
max_repeats, passes_needed,
dryrun=False, timeout=None) -> bool:
"""Run a specific function:tag of a test, until it passes enough times, or
until max_repeats is reached.
Return True if it passes eventually, False if it fails.
"""
assert passes_needed <= max_repeats
failed_arg = what_failed.func
if what_failed.tag:
failed_arg += ":" + what_failed.tag
n_passes = 0
for i in range(max_repeats):
# For the individual testcase re-runs, we log to file since Coin needs
# to parse it. That is the reason we use unique filename every time.
filename_base = unique_filename(test_basename)
pathname_stem = os.path.join(output_dir, filename_base)
output_args = [
"-o", f"{pathname_stem}.xml,xml",
"-o", f"{pathname_stem}.junit.xml,junitxml",
"-o", f"{pathname_stem}.txt,txt",
"-o", "-,txt"]
L.info("Re-running testcase: %s", failed_arg)
if i < max_repeats - 1:
proc = run_test(testargs + output_args + [failed_arg],
timeout=timeout)
else: # last re-run
proc = run_test(testargs + output_args + VERBOSE_ARGS + [failed_arg],
timeout=timeout,
env={**os.environ, **VERBOSE_ENV})
if proc.returncode == 0:
n_passes += 1
if n_passes == passes_needed:
L.info("Test has PASSed as FLAKY after re-runs:%d, passes:%d, failures:%d",
i+1, n_passes, i+1-n_passes)
return True
assert n_passes < passes_needed
assert n_passes <= max_repeats
n_failures = max_repeats - n_passes
L.info("Test has FAILed despite all repetitions! re-runs:%d failures:%d",
max_repeats, n_failures)
return False
def main():
args = parse_args()
n_full_runs = 1 if args.parse_xml_testlog else 2
for i in range(n_full_runs + 1):
if 0 < i < n_full_runs:
L.info("Will re-run the full test executable")
elif i == n_full_runs: # Failed on the final run
L.error("Full test run failed repeatedly, aborting!")
sys.exit(3)
try:
failed_functions = []
if args.parse_xml_testlog: # do not run test, just parse file
failed_functions = parse_log(args.parse_xml_testlog)
# Pretend the test returned correct exit code
retcode = len(failed_functions)
else: # normal invocation, run test
(retcode, results_file) = \
run_full_test(args.test_basename, args.testargs, args.log_dir,
args.no_extra_args, args.dry_run, args.timeout,
args.specific_extra_args)
if results_file:
failed_functions = parse_log(results_file)
if retcode == 0:
if failed_functions:
L.warning("The test executable returned success but the logfile"
f" contains FAIL for function: {failed_functions[0].func}")
continue
sys.exit(0) # PASS
if len(failed_functions) == 0:
L.warning("No failures listed in the XML test log!"
" Did the test CRASH right after all its testcases PASSed?")
continue
cant_rerun = [ f.func for f in failed_functions if f.func in NO_RERUN_FUNCTIONS ]
if cant_rerun:
L.warning(f"Failure detected in the special test function '{cant_rerun[0]}'"
" which can not be re-run individually")
continue
assert len(failed_functions) > 0 and retcode != 0
break # all is fine, goto re-running individual failed testcases
except Exception as e:
L.error("exception:%s %s", type(e).__name__, e)
L.error("The test executable probably crashed, see above for details")
if args.max_repeats == 0:
sys.exit(2) # Some tests failed but no re-runs were asked
L.info("Some tests failed, will re-run at most %d times.\n",
args.max_repeats)
for what_failed in failed_functions:
try:
ret = rerun_failed_testcase(args.test_basename, args.testargs, args.log_dir,
what_failed, args.max_repeats, args.passes_needed,
dryrun=args.dry_run, timeout=args.timeout)
except Exception as e:
L.error("exception:%s %s", type(e).__name__, e)
L.error("The testcase re-run probably crashed, giving up")
sys.exit(3) # Test re-run CRASH
if not ret:
sys.exit(2) # Test re-run FAIL
sys.exit(0) # All testcase re-runs PASSed
if __name__ == "__main__":
main()

View File

@ -0,0 +1,49 @@
#!/usr/bin/env python3
# Copyright (C) 2022 The Qt Company Ltd.
# SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only WITH Qt-GPL-exception-1.0
import os
import re
import sys
import logging as L
from subprocess import Popen, PIPE
# Thin testrunner that ignores failures in tests and only catches
# crashes or ASAN errors.
#
# It executes its arguments as a command line, and parses the stderr for the
# following regex:
detect_ASAN = re.compile(r"^==[0-9]+==ERROR: AddressSanitizer")
my_name = os.path.basename(sys.argv[0])
logging_format = my_name + " %(levelname)8s: %(message)s"
L.basicConfig(format=logging_format, level=L.DEBUG)
proc = None
if sys.argv[1] == "-f": # hidden option to parse pre-existing files
f = open(sys.argv[2], "r", errors="ignore")
else:
proc = Popen(sys.argv[1:], stderr=PIPE, universal_newlines=True, errors="ignore")
f = proc.stderr
issues_detected = False
for line in f:
if proc:
# We don't want the stderr of the subprocess to disappear, so print it.
print(line, file=sys.stderr, end="", flush=True)
if detect_ASAN.match(line):
issues_detected = True
f.close()
if proc:
proc.wait()
rc = proc.returncode
L.info("Test exit code was: %d", rc)
if not ( 0 <= rc <= 127 ):
L.error("Crash detected")
exit(1)
if issues_detected:
L.error("ASAN issues detected")
exit(1)

View File

@ -0,0 +1,36 @@
<?xml version="1.0" encoding="UTF-8"?>
<TestCase name="qt_mock_test">
<Environment>
<QtVersion>MOCK</QtVersion>
<QtBuild>MOCK</QtBuild>
<QTestVersion>6.3.0</QTestVersion>
</Environment>
<TestFunction name="initTestCase">
<Incident type="{{initTestCase_result}}" file="" line="0" />
<Duration msecs="0.00004"/>
</TestFunction>
<TestFunction name="always_pass">
<Incident type="{{always_pass_result}}" file="" line="0" />
<Duration msecs="0.71704"/>
</TestFunction>
<TestFunction name="always_fail">
<Incident type="{{always_fail_result}}" file="" line="0" />
<Duration msecs="0.828272"/>
</TestFunction>
<TestFunction name="always_crash">
<Incident type="{{always_crash_result}}" file="" line="0" />
<Duration msecs="0.828272"/>
</TestFunction>
<TestFunction name="fail_then_pass">
<Incident type="{{fail_then_pass:2_result}}" file="" line="0">
<DataTag><![CDATA[2]]></DataTag>
</Incident>
<Incident type="{{fail_then_pass:5_result}}" file="" line="0">
<DataTag><![CDATA[5]]></DataTag>
</Incident>
<Incident type="{{fail_then_pass:6_result}}" file="" line="0">
<DataTag><![CDATA[6]]></DataTag>
</Incident>
</TestFunction>
<Duration msecs="1904.9"/>
</TestCase>

View File

@ -0,0 +1,182 @@
#!/usr/bin/env python3
# Copyright (C) 2021 The Qt Company Ltd.
# SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only WITH Qt-GPL-exception-1.0
# This is an artificial test, mimicking the Qt tests, for example tst_whatever.
# Its purpose is to assist in testing qt-testrunner.py.
#
# Mode A:
#
# If invoked with a test function argument, it runs that test function.
#
# Usage:
#
# $0 always_pass
# $0 always_fail
# $0 always_crash
# $0 fail_then_pass:N # where N is the number of failing runs before passing
#
# Needs environment variable:
# + QT_MOCK_TEST_STATE_FILE :: points to a unique filename, to be written
# for keeping the state of the fail_then_pass:N tests.
#
# Mode B:
#
# If invoked without any argument, it runs the tests listed in the
# variable QT_MOCK_TEST_FAIL_LIST. If variable is empty it just runs
# the always_pass test. It also understands qtestlib's `-o outfile.xml,xml`
# option for writing a mock testlog in a file. Requires environment variables:
# + QT_MOCK_TEST_STATE_FILE :: See above
# + QT_MOCK_TEST_XML_TEMPLATE_FILE :: may point to the template XML file
# located in the same source directory. Without this variable, the
# option `-o outfile.xml,xml` will be ignored.
# + QT_MOCK_TEST_FAIL_LIST :: may contain a comma-separated list of test
# that should run.
import sys
import os
import traceback
from tst_testrunner import write_xml_log
MY_NAME = os.path.basename(sys.argv[0])
STATE_FILE = None
XML_TEMPLATE = None
XML_OUTPUT_FILE = None
def put_failure(test_name):
with open(STATE_FILE, "a") as f:
f.write(test_name + "\n")
def get_failures(test_name):
n = 0
try:
with open(STATE_FILE) as f:
for line in f:
if line.strip() == test_name:
n += 1
except FileNotFoundError:
return 0
return n
# Only care about the XML log output file.
def parse_output_argument(a):
global XML_OUTPUT_FILE
if a.endswith(",xml"):
XML_OUTPUT_FILE = a[:-4]
# Strip qtestlib specific arguments.
# Only care about the "-o ...,xml" argument.
def clean_cmdline():
args = []
prev_arg = None
skip_next_arg = True # Skip argv[0]
for a in sys.argv:
if skip_next_arg:
if prev_arg == "-o":
parse_output_argument(a)
prev_arg = None
skip_next_arg = False
continue
if a in ("-o", "-maxwarnings"):
skip_next_arg = True
prev_arg = a
continue
if a in ("-v1", "-v2", "-vs"):
print("VERBOSE RUN")
if "QT_LOGGING_RULES" in os.environ:
print("Environment has QT_LOGGING_RULES:",
os.environ["QT_LOGGING_RULES"])
continue
args.append(a)
return args
def log_test(testcase, result,
testsuite=MY_NAME.rpartition(".")[0]):
print("%-7s: %s::%s()" % (result, testsuite, testcase))
# Return the exit code
def run_test(testname):
if testname == "initTestCase":
exit_code = 1 # specifically test that initTestCase fails
elif testname == "always_pass":
exit_code = 0
elif testname == "always_fail":
exit_code = 1
elif testname == "always_crash":
exit_code = 130
elif testname.startswith("fail_then_pass"):
wanted_fails = int(testname.partition(":")[2])
previous_fails = get_failures(testname)
if previous_fails < wanted_fails:
put_failure(testname)
exit_code = 1
else:
exit_code = 0
else:
assert False, "Unknown argument: %s" % testname
if exit_code == 0:
log_test(testname, "PASS")
elif exit_code == 1:
log_test(testname, "FAIL!")
else:
log_test(testname, "CRASH!")
return exit_code
def no_args_run():
try:
run_list = os.environ["QT_MOCK_TEST_RUN_LIST"].split(",")
except KeyError:
run_list = ["always_pass"]
total_result = True
fail_list = []
for test in run_list:
test_exit_code = run_test(test)
if test_exit_code not in (0, 1):
sys.exit(130) # CRASH!
if test_exit_code != 0:
fail_list.append(test)
total_result = total_result and (test_exit_code == 0)
if XML_TEMPLATE and XML_OUTPUT_FILE:
write_xml_log(XML_OUTPUT_FILE, failure=fail_list)
if total_result:
sys.exit(0)
else:
sys.exit(1)
def main():
global STATE_FILE
# Will fail if env var is not set.
STATE_FILE = os.environ["QT_MOCK_TEST_STATE_FILE"]
global XML_TEMPLATE
if "QT_MOCK_TEST_XML_TEMPLATE_FILE" in os.environ:
with open(os.environ["QT_MOCK_TEST_XML_TEMPLATE_FILE"]) as f:
XML_TEMPLATE = f.read()
args = clean_cmdline()
if len(args) == 0:
no_args_run()
assert False, "Unreachable!"
else:
sys.exit(run_test(args[0]))
# TODO write XPASS test that does exit(1)
if __name__ == "__main__":
try:
main()
except Exception as e:
traceback.print_exc()
exit(128) # Something went wrong with this script

View File

@ -0,0 +1,303 @@
#!/usr/bin/env python3
# Copyright (C) 2021 The Qt Company Ltd.
# SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only WITH Qt-GPL-exception-1.0
import sys
import os
import re
import subprocess
from subprocess import STDOUT, PIPE
from tempfile import TemporaryDirectory, mkstemp
MY_NAME = os.path.basename(__file__)
my_dir = os.path.dirname(__file__)
testrunner = os.path.join(my_dir, "..", "qt-testrunner.py")
mock_test = os.path.join(my_dir, "qt_mock_test.py")
xml_log_template = os.path.join(my_dir, "qt_mock_test-log.xml")
with open(xml_log_template) as f:
XML_TEMPLATE = f.read()
import unittest
def setUpModule():
global TEMPDIR
TEMPDIR = TemporaryDirectory(prefix="tst_testrunner-")
filename = os.path.join(TEMPDIR.name, "file_1")
print("setUpModule(): setting up temporary directory and env var"
" QT_MOCK_TEST_STATE_FILE=" + filename + " and"
" QT_MOCK_TEST_XML_TEMPLATE_FILE=" + xml_log_template)
os.environ["QT_MOCK_TEST_STATE_FILE"] = filename
os.environ["QT_MOCK_TEST_XML_TEMPLATE_FILE"] = xml_log_template
def tearDownModule():
print("\ntearDownModule(): Cleaning up temporary directory:",
TEMPDIR.name)
del os.environ["QT_MOCK_TEST_STATE_FILE"]
TEMPDIR.cleanup()
# Helper to run a command and always capture output
def run(*args, **kwargs):
if DEBUG:
print("Running: ", args, flush=True)
proc = subprocess.run(*args, stdout=PIPE, stderr=STDOUT, **kwargs)
if DEBUG and proc.stdout:
print(proc.stdout.decode(), flush=True)
return proc
# Helper to run qt-testrunner.py with proper testing arguments.
def run_testrunner(xml_filename=None, extra_args=None, env=None):
args = [ testrunner, mock_test ]
if xml_filename:
args += [ "--parse-xml-testlog", xml_filename ]
if extra_args:
args += extra_args
return run(args, env=env)
# Write the XML_TEMPLATE to filename, replacing the templated results.
def write_xml_log(filename, failure=None):
data = XML_TEMPLATE
# Replace what was asked to fail with "fail"
if type(failure) in (list, tuple):
for template in failure:
data = data.replace("{{"+template+"_result}}", "fail")
elif type(failure) is str:
data = data.replace("{{"+failure+"_result}}", "fail")
# Replace the rest with "pass"
data = re.sub(r"{{[^}]+}}", "pass", data)
with open(filename, "w") as f:
f.write(data)
# Test that qt_mock_test.py behaves well. This is necessary to properly
# test qt-testrunner.
class Test_qt_mock_test(unittest.TestCase):
def setUp(self):
state_file = os.environ["QT_MOCK_TEST_STATE_FILE"]
if os.path.exists(state_file):
os.remove(state_file)
def test_always_pass(self):
proc = run([mock_test, "always_pass"])
self.assertEqual(proc.returncode, 0)
def test_always_fail(self):
proc = run([mock_test, "always_fail"])
self.assertEqual(proc.returncode, 1)
def test_fail_then_pass_2(self):
proc = run([mock_test, "fail_then_pass:2"])
self.assertEqual(proc.returncode, 1)
proc = run([mock_test, "fail_then_pass:2"])
self.assertEqual(proc.returncode, 1)
proc = run([mock_test, "fail_then_pass:2"])
self.assertEqual(proc.returncode, 0)
def test_fail_then_pass_1(self):
proc = run([mock_test, "fail_then_pass:1"])
self.assertEqual(proc.returncode, 1)
proc = run([mock_test, "fail_then_pass:1"])
self.assertEqual(proc.returncode, 0)
def test_fail_then_pass_many_tests(self):
proc = run([mock_test, "fail_then_pass:1"])
self.assertEqual(proc.returncode, 1)
proc = run([mock_test, "fail_then_pass:2"])
self.assertEqual(proc.returncode, 1)
proc = run([mock_test, "fail_then_pass:1"])
self.assertEqual(proc.returncode, 0)
proc = run([mock_test, "fail_then_pass:2"])
self.assertEqual(proc.returncode, 1)
proc = run([mock_test, "fail_then_pass:2"])
self.assertEqual(proc.returncode, 0)
def test_xml_file_is_written(self):
filename = os.path.join(TEMPDIR.name, "testlog.xml")
proc = run([mock_test, "-o", filename+",xml"])
self.assertEqual(proc.returncode, 0)
self.assertTrue(os.path.exists(filename))
self.assertGreater(os.path.getsize(filename), 0)
os.remove(filename)
# Test regular invocations of qt-testrunner.
class Test_testrunner(unittest.TestCase):
def setUp(self):
state_file = os.environ["QT_MOCK_TEST_STATE_FILE"]
if os.path.exists(state_file):
os.remove(state_file)
old_logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml")
if os.path.exists(old_logfile):
os.remove(old_logfile)
self.env = dict()
self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] = os.environ["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
self.env["QT_MOCK_TEST_STATE_FILE"] = state_file
self.extra_args = [ "--log-dir", TEMPDIR.name ]
def prepare_env(self, run_list=None):
if run_list is not None:
self.env['QT_MOCK_TEST_RUN_LIST'] = ",".join(run_list)
def run2(self):
return run_testrunner(extra_args=self.extra_args, env=self.env)
def test_simple_invocation(self):
# All tests pass.
proc = self.run2()
self.assertEqual(proc.returncode, 0)
def test_always_pass(self):
self.prepare_env(run_list=["always_pass"])
proc = self.run2()
self.assertEqual(proc.returncode, 0)
def test_always_fail(self):
self.prepare_env(run_list=["always_fail"])
proc = self.run2()
# TODO verify that re-runs==max_repeats
self.assertEqual(proc.returncode, 2)
def test_flaky_pass_1(self):
self.prepare_env(run_list=["always_pass,fail_then_pass:1"])
proc = self.run2()
self.assertEqual(proc.returncode, 0)
def test_flaky_pass_5(self):
self.prepare_env(run_list=["always_pass,fail_then_pass:1,fail_then_pass:5"])
proc = self.run2()
self.assertEqual(proc.returncode, 0)
def test_flaky_fail(self):
self.prepare_env(run_list=["always_pass,fail_then_pass:6"])
proc = self.run2()
self.assertEqual(proc.returncode, 2)
def test_flaky_pass_fail(self):
self.prepare_env(run_list=["always_pass,fail_then_pass:1,fail_then_pass:6"])
proc = self.run2()
# TODO verify that one func was re-run and passed but the other failed.
self.assertEqual(proc.returncode, 2)
def test_initTestCase_fail_crash(self):
self.prepare_env(run_list=["initTestCase,always_pass"])
proc = self.run2()
self.assertEqual(proc.returncode, 3)
# If no XML file is found by qt-testrunner, it is usually considered a
# CRASH and the whole test is re-run. Even when the return code is zero.
# It is a PASS only if the test is not capable of XML output (see no_extra_args, TODO test it).
def test_no_xml_log_written_pass_crash(self):
del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
self.prepare_env(run_list=["always_pass"])
proc = self.run2()
self.assertEqual(proc.returncode, 3)
# On the 2nd iteration of the full test, both of the tests pass.
# Still it's a CRASH because no XML file was found.
def test_no_xml_log_written_fail_then_pass_crash(self):
del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
self.prepare_env(run_list=["always_pass,fail_then_pass:1"])
proc = self.run2()
# TODO verify that the whole test has run twice.
self.assertEqual(proc.returncode, 3)
# Even after 2 iterations of the full test we still get failures but no XML file,
# and this is considered a CRASH.
def test_no_xml_log_written_crash(self):
del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
self.prepare_env(run_list=["fail_then_pass:2"])
proc = self.run2()
self.assertEqual(proc.returncode, 3)
# If a test returns success but XML contains failures, it's a CRASH.
def test_wrong_xml_log_written_1_crash(self):
logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml")
write_xml_log(logfile, failure="always_fail")
del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
self.prepare_env(run_list=["always_pass"])
proc = self.run2()
self.assertEqual(proc.returncode, 3)
# If a test returns failure but XML contains only pass, it's a CRASH.
def test_wrong_xml_log_written_2_crash(self):
logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml")
write_xml_log(logfile)
del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"]
self.prepare_env(run_list=["always_fail"])
proc = self.run2()
self.assertEqual(proc.returncode, 3)
# Test qt-testrunner script with an existing XML log file:
# qt-testrunner.py qt_mock_test.py --parse-xml-testlog file.xml
# qt-testrunner should repeat the testcases that are logged as
# failures and fail or pass depending on how the testcases behave.
# Different XML files are generated for the following test cases.
# + No failure logged. qt-testrunner should exit(0)
# + The "always_pass" test has failed. qt-testrunner should exit(0).
# + The "always_fail" test has failed. qt-testrunner should exit(2).
# + The "always_crash" test has failed. qt-testrunner should exit(2).
# + The "fail_then_pass:2" test failed. qt-testrunner should exit(0).
# + The "fail_then_pass:5" test failed. qt-testrunner should exit(2).
# + The "initTestCase" failed which is listed as NO_RERUN thus
# qt-testrunner should exit(3).
class Test_testrunner_with_xml_logfile(unittest.TestCase):
# Runs before every single test function, creating a unique temp file.
def setUp(self):
(_handle, self.xml_file) = mkstemp(
suffix=".xml", prefix="qt_mock_test-log-",
dir=TEMPDIR.name)
if os.path.exists(os.environ["QT_MOCK_TEST_STATE_FILE"]):
os.remove(os.environ["QT_MOCK_TEST_STATE_FILE"])
def tearDown(self):
os.remove(self.xml_file)
del self.xml_file
def test_no_failure(self):
write_xml_log(self.xml_file, failure=None)
proc = run_testrunner(self.xml_file)
self.assertEqual(proc.returncode, 0)
def test_always_pass_failed(self):
write_xml_log(self.xml_file, failure="always_pass")
proc = run_testrunner(self.xml_file)
self.assertEqual(proc.returncode, 0)
def test_always_pass_failed_max_repeats_0(self):
write_xml_log(self.xml_file, failure="always_pass")
proc = run_testrunner(self.xml_file,
extra_args=["--max-repeats", "0"])
self.assertEqual(proc.returncode, 2)
def test_always_fail_failed(self):
write_xml_log(self.xml_file, failure="always_fail")
proc = run_testrunner(self.xml_file)
self.assertEqual(proc.returncode, 2)
# Assert that one of the re-runs was in verbose mode
matches = re.findall("VERBOSE RUN",
proc.stdout.decode())
self.assertEqual(len(matches), 1)
# Assert that the environment was altered too
self.assertIn("QT_LOGGING_RULES", proc.stdout.decode())
def test_always_crash_failed(self):
write_xml_log(self.xml_file, failure="always_crash")
proc = run_testrunner(self.xml_file)
self.assertEqual(proc.returncode, 2)
def test_fail_then_pass_2_failed(self):
write_xml_log(self.xml_file, failure="fail_then_pass:2")
proc = run_testrunner(self.xml_file)
self.assertEqual(proc.returncode, 0)
def test_fail_then_pass_5_failed(self):
write_xml_log(self.xml_file, failure="fail_then_pass:5")
proc = run_testrunner(self.xml_file)
self.assertEqual(proc.returncode, 2)
def test_with_two_failures(self):
write_xml_log(self.xml_file,
failure=["always_pass", "fail_then_pass:2"])
proc = run_testrunner(self.xml_file)
self.assertEqual(proc.returncode, 0)
# Check that test output is properly interleaved with qt-testrunner's logging.
matches = re.findall(r"(PASS|FAIL!).*\n.*Test process exited with code",
proc.stdout.decode())
self.assertEqual(len(matches), 4)
def test_initTestCase_fail_crash(self):
write_xml_log(self.xml_file, failure="initTestCase")
proc = run_testrunner(self.xml_file)
self.assertEqual(proc.returncode, 3)
if __name__ == "__main__":
DEBUG = False
if "--debug" in sys.argv:
sys.argv.remove("--debug")
DEBUG = True
# We set failfast=True as we do not want the test suite to continue if the
# tests of qt_mock_test failed. The next ones depend on it.
unittest.main(failfast=True)