From d1a9ad599cbd517c47e2979d13c7ec354a93a0da Mon Sep 17 00:00:00 2001 From: Anton Iakimov Date: Tue, 21 Nov 2023 16:39:11 +0100 Subject: [PATCH] feat: detect flaky tests This reverts commit f12803ec95279d06243865630b2720145cf88478. --- Makefile | 1 + _assets/ci/Jenkinsfile | 1 + _assets/ci/Jenkinsfile.android | 1 + _assets/ci/Jenkinsfile.docker | 1 + _assets/ci/Jenkinsfile.ios | 1 + _assets/ci/Jenkinsfile.linux | 1 + _assets/ci/Jenkinsfile.tests | 34 ++++++++++++++++++++++++- _assets/scripts/run_unit_tests.sh | 42 +++++++++++++++++++++++++++---- _assets/scripts/test_stats.py | 36 ++++++++++++++++++++++++++ 9 files changed, 112 insertions(+), 6 deletions(-) create mode 100755 _assets/scripts/test_stats.py diff --git a/Makefile b/Makefile index ded6cd291..57a7e20a3 100644 --- a/Makefile +++ b/Makefile @@ -314,6 +314,7 @@ docker-test: ##@tests Run tests in a docker container with golang. test: test-unit ##@tests Run basic, short tests during development test-unit: export BUILD_TAGS ?= +test-unit: export UNIT_TEST_FAILFAST ?= true # Ensure 'waku' and 'wakuv2' tests are executed first to reduce the impact of flaky tests. # Otherwise, the entire target might fail at the end, making re-runs time-consuming. test-unit: export UNIT_TEST_PACKAGES ?= $(shell go list ./... | \ diff --git a/_assets/ci/Jenkinsfile b/_assets/ci/Jenkinsfile index 06c55c487..4c824a192 100644 --- a/_assets/ci/Jenkinsfile +++ b/_assets/ci/Jenkinsfile @@ -1,3 +1,4 @@ +#!/usr/bin/env groovy library 'status-jenkins-lib@v1.7.0' pipeline { diff --git a/_assets/ci/Jenkinsfile.android b/_assets/ci/Jenkinsfile.android index 4f751fbaf..a8ba9d5e7 100644 --- a/_assets/ci/Jenkinsfile.android +++ b/_assets/ci/Jenkinsfile.android @@ -1,3 +1,4 @@ +#!/usr/bin/env groovy library 'status-jenkins-lib@v1.7.0' pipeline { diff --git a/_assets/ci/Jenkinsfile.docker b/_assets/ci/Jenkinsfile.docker index 14fdec7bd..757b5fdb3 100644 --- a/_assets/ci/Jenkinsfile.docker +++ b/_assets/ci/Jenkinsfile.docker @@ -1,3 +1,4 @@ +#!/usr/bin/env groovy library 'status-jenkins-lib@v1.7.0' pipeline { diff --git a/_assets/ci/Jenkinsfile.ios b/_assets/ci/Jenkinsfile.ios index 4c114b8d2..ca129b9c2 100644 --- a/_assets/ci/Jenkinsfile.ios +++ b/_assets/ci/Jenkinsfile.ios @@ -1,3 +1,4 @@ +#!/usr/bin/env groovy library 'status-jenkins-lib@v1.7.0' pipeline { diff --git a/_assets/ci/Jenkinsfile.linux b/_assets/ci/Jenkinsfile.linux index 0f8133bad..0c9f8aa7f 100644 --- a/_assets/ci/Jenkinsfile.linux +++ b/_assets/ci/Jenkinsfile.linux @@ -1,3 +1,4 @@ +#!/usr/bin/env groovy library 'status-jenkins-lib@v1.7.0' pipeline { diff --git a/_assets/ci/Jenkinsfile.tests b/_assets/ci/Jenkinsfile.tests index 4f0e6faa8..6407387d2 100644 --- a/_assets/ci/Jenkinsfile.tests +++ b/_assets/ci/Jenkinsfile.tests @@ -1,3 +1,4 @@ +#!/usr/bin/env groovy library 'status-jenkins-lib@v1.7.0' pipeline { @@ -9,12 +10,22 @@ pipeline { defaultValue: 'develop', description: 'Name of branch to build.' ) + string( + name: 'UNIT_TEST_COUNT', + defaultValue: getDefaultUnitTestCount(), + description: 'How many times to run tests?' + ) + booleanParam( + name: 'UNIT_TEST_FAILFAST', + defaultValue: getDefaultUnitTestFailfast(), + description: 'Should the job fail fast on first test failure?' + ) } options { timestamps() /* Prevent Jenkins jobs from running forever */ - timeout(time: 40, unit: 'MINUTES') + timeout(time: getDefaultTimeout(), unit: 'MINUTES') disableConcurrentBuilds() /* manage how many builds we keep */ buildDiscarder(logRotator( @@ -99,3 +110,24 @@ pipeline { cleanup { dir(env.TMPDIR) { deleteDir() } } } // post } // pipeline + +def getDefaultUnitTestCount() { + if (env.JOB_BASE_NAME == 'tests-nightly') { + return '10' + } + return '1' +} + +def getDefaultUnitTestFailfast() { + if (env.JOB_BASE_NAME == 'tests-nightly') { + return false + } + return true +} + +def getDefaultTimeout() { + if (env.JOB_BASE_NAME == 'tests-nightly') { + return 8*60 + } + return 40 +} diff --git a/_assets/scripts/run_unit_tests.sh b/_assets/scripts/run_unit_tests.sh index 27c3309ff..0537ecdd9 100755 --- a/_assets/scripts/run_unit_tests.sh +++ b/_assets/scripts/run_unit_tests.sh @@ -5,21 +5,53 @@ GIT_ROOT=$(cd "${BASH_SOURCE%/*}" && git rev-parse --show-toplevel) source "${GIT_ROOT}/_assets/scripts/colors.sh" +if [[ $UNIT_TEST_FAILFAST == 'true' ]]; then + GOTEST_EXTRAFLAGS="${GOTEST_EXTRAFLAGS} --failfast" +fi + +if [[ -z "${UNIT_TEST_COUNT}" ]]; then + UNIT_TEST_COUNT=1 +fi + +redirect_stdout() { + output_file=$1 + + if [[ "${CI}" == 'true' ]]; + then + cat > "${output_file}"; + else + tee "${output_file}"; + fi +} + +last_failing_exit_code=0 + for package in ${UNIT_TEST_PACKAGES}; do echo -e "${GRN}Testing:${RST} ${package}" package_dir=$(go list -f "{{.Dir}}" "${package}") output_file=${package_dir}/test.log - go test -tags "${BUILD_TAGS}" -timeout 30m -v -failfast "${package}" ${GOTEST_EXTRAFLAGS} | \ - if [ "${CI}" = "true" ]; then cat > "${output_file}"; else tee "${output_file}"; fi + go test -timeout 30m -count="${UNIT_TEST_COUNT}" -tags "${BUILD_TAGS}" -v "${package}" ${GOTEST_EXTRAFLAGS} | \ + redirect_stdout "${output_file}" go_test_exit=$? - if [ "${CI}" = "true" ]; then + if [[ "${CI}" == 'true' ]]; then go-junit-report -in "${output_file}" -out "${package_dir}"/report.xml fi - if [ ${go_test_exit} -ne 0 ]; then + if [[ "${go_test_exit}" -ne 0 ]]; then echo -e "${YLW}Failed, see the log:${RST} ${BLD}${output_file}${RST}" - exit "${go_test_exit}" + if [[ "$UNIT_TEST_FAILFAST" == 'true' ]]; then + exit "${go_test_exit}" + fi + last_failing_exit_code="${go_test_exit}" fi done + +if [[ "${last_failing_exit_code}" -ne 0 ]]; then + if [[ "${UNIT_TEST_COUNT}" -gt 1 ]]; then + "${GIT_ROOT}/_assets/scripts/test_stats.py" + fi + + exit "${last_failing_exit_code}" +fi diff --git a/_assets/scripts/test_stats.py b/_assets/scripts/test_stats.py new file mode 100755 index 000000000..b71d80105 --- /dev/null +++ b/_assets/scripts/test_stats.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python + +import glob +import xml.etree.ElementTree as ET +from collections import defaultdict + +test_stats = defaultdict(lambda: defaultdict(int)) + +for file in glob.glob("**/report.xml", recursive=True): + tree = ET.parse(file) + root = tree.getroot() + for testcase in root.iter("testcase"): + test_name = testcase.attrib["name"] + + test_stats[test_name]["total"] += 1 + + if testcase.find("failure") is not None: + test_stats[test_name]["failed"] += 1 + elif testcase.find("error") is not None: + test_stats[test_name]["failed"] += 1 + +failing_test_stats = [ + {"name": name, "failure_rate": stats["failed"] / stats["total"]} + for name, stats in test_stats.items() if stats["failed"] != 0 +] + +sorted_failing_test_stats = sorted(failing_test_stats, + key=lambda x: x["failure_rate"], + reverse=True) + +print("---") +print("Failing tests stats") +print("(test name: failure rate)") +print("---") +for test_stat in sorted_failing_test_stats: + print(f"{test_stat['name']}: {test_stat['failure_rate'] * 100}%")