summaryrefslogtreecommitdiff
path: root/scripts/runtests
blob: 81e815d84565a5426e374f316e2b6e4dc6825099 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
#!/bin/bash

# When running with "-v", the test itself runs in a pipeline with tee, and
# without pipefail we get the exit value from tee instead of from the test.
set -o pipefail

# The linuxcnc starter script sometimes tries to display X windows if
# DISPLAY is set.  We never want that while running tests, so unset it.
unset DISPLAY

# Some of our tests emit locale-sensitive strings, so reset the locale
# to a sane default.
export LC_ALL=C


case "$0" in
	*/*) MYDIR="${0%/*}" ;;
	*) MYDIR="`type -path $0`"; MYDIR="${MYDIR%/*}"
esac
MYDIR=$(cd $MYDIR; pwd);
TOPDIR=$(cd $MYDIR/..; pwd)

. $TOPDIR/scripts/rip-environment >/dev/null

NUM=0
FAIL=0; FAIL_NAMES=""
XFAIL=0
VERBOSE=0

clean () {
    find $* \( -name "stderr" -or -name "result" \) -print0 | xargs -0 rm -f
}

run_shell_script () {
    testname=$(basename $1)
    testdir=$(dirname $1)

    pushd $testdir > /dev/null
    if [ $VERBOSE -eq 1 ]; then
        (bash -x $testname | tee result) 3>&1 1>&2 2>&3 | tee stderr
    else
        bash -x $testname > result 2> stderr
    fi
    exitcode=$?
    popd > /dev/null
    return $exitcode
}

run_executable () {
    testname=$(basename $1)
    testdir=$(dirname $1)

    pushd $testdir > /dev/null
    if [ $VERBOSE -eq 1 ]; then
        (./$testname | tee result) 3>&1 1>&2 2>&3 | tee stderr
    else
        ./$testname > result 2> stderr
    fi
    exitcode=$?
    popd > /dev/null
    return $exitcode
}

run_without_overruns () {
    testname=$(basename $1)
    testdir=$(dirname $1)
    for i in $(seq 10); do
        if [ $i != 1 ]; then echo "--- $testdir: overrun detected in sampler, re-running test" 1>&2 ; fi

        pushd $testdir > /dev/null
        if [ $VERBOSE -eq 1 ]; then
            (halrun -f $testname | tee result) 3>&1 1>&2 2>&3 | tee stderr
        else
            halrun -f $testname > result 2> stderr
        fi
        exitcode=$?
        popd > /dev/null

        if ! grep -q '^overrun$' $testdir/result; then return $exitcode; fi
    done
    echo "--- $testdir: $i overruns detected, giving up" 1>&2
    return 1
}

TMPDIR=`mktemp -d /tmp/runtest.XXXXXX`
trap "rm -rf $TMPDIR" 0 1 2 3 9 15


run_tests () {
    find $* -name test.hal -or -name test.sh -or -name test \
	| sort > $TMPDIR/alltests

    while read testname; do
	testdir=$(dirname $testname)
	if [ -e $testdir/skip ]; then
	    if ! [ -x $testdir/skip ] || ! $testdir/skip; then
		echo "Skipping test: $testdir" 1>&2
		continue
	    fi
	fi
	NUM=$(($NUM+1))
	echo "Running test: $testdir" 1>&2
        case $testname in
        *.hal) run_without_overruns $testname ;;
        *.sh) run_shell_script $testname ;;
        *) run_executable $testname ;;
        esac
	exitcode=$?
	if [ $exitcode -ne 0 ]; then
	    reason="test run exited with $exitcode"
	else
	    if [ -e $testdir/checkresult ]; then
		$testdir/checkresult $testdir/result
		exitcode=$?
		reason="checkresult exited with $exitcode"
	    elif [ -f $testdir/expected ]; then
		cmp -s $testdir/expected $testdir/result
		exitcode=$?
		reason="result differed from expected"
		if [ $exitcode -ne 0 ]; then
		    diff -u $testdir/expected $testdir/result > $TMPDIR/diff
		    SIZE=$(wc -l < $TMPDIR/diff)
		    if [ $SIZE -lt 40 ]; then
			cat $TMPDIR/diff
		    else
			OMIT=$((SIZE-40))
			head -40 $TMPDIR/diff
			echo "($OMIT more lines omitted)"
		    fi
		fi
	    else
		exitcode=1
		reason="Neither expected nor checkresult existed"
	    fi
	fi
	if [ $exitcode -ne 0 ]; then
	    if [ -f $testdir/xfail ]; then
		XFAIL=$(($XFAIL+1))
		echo "*** $testdir: XFAIL: $reason"
		if [ $NOCLEAN -eq 0 ]; then
		    rm -f $testdir/stderr $testdir/result
		fi
	    else
		FAIL=$(($FAIL+1))
		FAIL_NAMES="$FAIL_NAMES
	$testdir"
		echo "*** $testdir: FAIL: $reason"
	    fi
	else
	    if [ -f $testdir/xfail ]; then
		echo "*** $testdir: XPASS: Passed, but was expected to fail"
	    else
		if [ $NOCLEAN -eq 0 ]; then
		    rm -f $testdir/stderr $testdir/result
		fi
	    fi
	fi
    done < $TMPDIR/alltests

    SUCC=$((NUM-FAIL-XFAIL))
    echo "Runtest: $NUM tests run, $SUCC successful, $FAIL failed + $XFAIL expected"
    if [ $FAIL -ne 0 ]; then
	echo "Failed: $FAIL_NAMES"
	exit 1;
    else
	exit 0;
    fi
}

usage () {
    P=${0##*/}
    cat <<EOF
$P: Run HAL test suite items

Usage:
    $P [-n] tests
	Run tests.  With '-n', do not remove temporary files for successful
	tests.

    $P -c tests
	Remove temporary files from an earlier test run.

    $P -v
        Show stdout and stderr (normally it's hidden).
EOF
}

CLEAN_ONLY=0
NOCLEAN=0
while getopts cnvh opt; do
    case "$opt" in
    c) CLEAN_ONLY=1 ;;
    n) NOCLEAN=1 ;;
    v) VERBOSE=1 ;;
    h|?) usage; exit 0 ;;
    *) usage; exit 1 ;;
    esac
done
shift $((OPTIND-1))

if [ $# -eq 0 ]; then
    if [ -f test.hal -o -f test.sh ]; then
        set -- .
    else
        set -- $TOPDIR/tests
    fi
fi

if [ $CLEAN_ONLY -eq 1 ]; then
    clean "$@"
else
    run_tests "$@"
fi