=== modified file 'lava_dispatcher/actions/launch_control.py'
@@ -207,7 +207,7 @@
if not all_bundles:
main_bundle = {
"test_runs": [],
- "format": "Dashboard Bundle Format 1.3"
+ "format": "Dashboard Bundle Format 1.5"
}
else:
main_bundle = all_bundles.pop(0)
=== modified file 'lava_dispatcher/actions/lava_test_shell.py'
@@ -20,6 +20,68 @@
# along
# with this program; if not, see <http://www.gnu.org/licenses>.
+# LAVA Test Shell implementation details
+# ======================================
+#
+# The idea of lava-test-shell is a YAML test definition is "compiled" into a
+# job that is run when the device under test boots and then the output of this
+# job is retrieved and analyzed and turned into a bundle of results.
+#
+# In practice, this means a hierarchy of directories and files is created
+# during test installation, a sub-hierarchy is created during execution to
+# hold the results and these latter sub-hierarchy whole lot is poked at on the
+# host during analysis.
+#
+# On Ubuntu and OpenEmbedded, the hierarchy is rooted at /lava. / is mounted
+# read-only on Android, so there we root the hierarchy at /data/lava. I'll
+# assume Ubuntu paths from here for simplicity.
+#
+# The directory tree that is created during installation looks like this:
+#
+# /lava/
+# bin/ This directory is put on the path when the
+# test code is running -- these binaries can
+# be viewed as a sort of device-side "API"
+# for test authors.
+# lava-test-runner The job that runs the tests on boot.
+# lava-test-shell A helper to run a test suite.
+# lava-test-case-attach A helper to attach a file to a test result.
+# tests/
+# ${IDX}_${TEST_ID}/ One directory per test to be executed.
+# testdef.yml The test definition.
+# install.sh The install steps.
+# run.sh The run steps.
+# [repos] The test definition can specify bzr or git
+# repositories to clone into this directory.
+#
+# In addition, a file /etc/lava-test-runner.conf is created containing the
+# names of the directories in /lava/tests/ to execute.
+#
+# During execution, the following files are created:
+#
+# /lava/
+# results/
+# cpuinfo.txt Hardware info.
+# meminfo.txt Ditto.
+# build.txt Software info.
+# pkgs.txt Ditto
+# ${IDX}_${TEST_ID}-${TIMESTAMP}/
+# testdef.yml Attached to the test run in the bundle for
+# archival purposes.
+# install.sh Ditto.
+# run.sh Ditto.
+# stdout.log The standard output of run.sh.
+# stderr.log The standard error of run.sh (actually not
+# created currently)
+# return_code The exit code of run.sh.
+# attachments/ Contains attachments for test results.
+# ${TEST_CASE_ID}/ Names the test result.
+# ${FILENAME} The attached data.
+# ${FILENAME}.mimetype The mime type of the attachment.
+#
+# After the test run has completed, the /lava/results directory is pulled over
+# to the host and turned into a bundle for submission to the dashboard.
+
import json
import yaml
import logging
@@ -43,21 +105,25 @@
LAVA_TEST_UPSTART = '%s/lava-test-runner.conf' % LAVA_TEST_DIR
LAVA_TEST_INITD = '%s/lava-test-runner.init.d' % LAVA_TEST_DIR
LAVA_TEST_SHELL = '%s/lava-test-shell' % LAVA_TEST_DIR
+LAVA_TEST_CASE_ATTACH = '%s/lava-test-case-attach' % LAVA_TEST_DIR
Target.android_deployment_data['lava_test_runner'] = LAVA_TEST_ANDROID
Target.android_deployment_data['lava_test_shell'] = LAVA_TEST_SHELL
+Target.android_deployment_data['lava_test_case_attach'] = LAVA_TEST_CASE_ATTACH
Target.android_deployment_data['lava_test_sh_cmd'] = '/system/bin/mksh'
Target.android_deployment_data['lava_test_dir'] = '/data/lava'
Target.android_deployment_data['lava_test_results_part_attr'] = 'data_part_android_org'
Target.ubuntu_deployment_data['lava_test_runner'] = LAVA_TEST_UBUNTU
Target.ubuntu_deployment_data['lava_test_shell'] = LAVA_TEST_SHELL
+Target.ubuntu_deployment_data['lava_test_case_attach'] = LAVA_TEST_CASE_ATTACH
Target.ubuntu_deployment_data['lava_test_sh_cmd'] = '/bin/sh'
Target.ubuntu_deployment_data['lava_test_dir'] = '/lava'
Target.ubuntu_deployment_data['lava_test_results_part_attr'] = 'root_part'
Target.oe_deployment_data['lava_test_runner'] = LAVA_TEST_UBUNTU
Target.oe_deployment_data['lava_test_shell'] = LAVA_TEST_SHELL
+Target.oe_deployment_data['lava_test_case_attach'] = LAVA_TEST_CASE_ATTACH
Target.oe_deployment_data['lava_test_sh_cmd'] = '/bin/sh'
Target.oe_deployment_data['lava_test_dir'] = '/lava'
Target.oe_deployment_data['lava_test_results_part_attr'] = 'root_part'
@@ -146,6 +212,13 @@
fout.write(fin.read())
os.fchmod(fout.fileno(), XMOD)
+ tc = target.deployment_data['lava_test_case_attach']
+ with open(tc, 'r') as fin:
+ with open('%s/bin/lava-test-case-attach' % mntdir, 'w') as fout:
+ fout.write('#!%s\n\n' % shcmd)
+ fout.write(fin.read())
+ os.fchmod(fout.fileno(), XMOD)
+
def _bzr_info(self, url, bzrdir):
cwd = os.getcwd()
try:
=== modified file 'lava_dispatcher/lava_test_shell.py'
@@ -20,7 +20,7 @@
import datetime
import errno
-import json
+import mimetypes
import yaml
import logging
import os
@@ -115,7 +115,7 @@
return ctx
-def _get_test_results(testdef, stdout):
+def _get_test_results(testdef, stdout, attachments_dir):
results = []
fixupdict = {}
@@ -141,6 +141,24 @@
if res['result'] not in ('pass', 'fail', 'skip', 'unknown'):
logging.error('bad test result line: %s' % line.strip())
continue
+ tc_id = res.get('test_case_id')
+ if tc_id is not None:
+ d = os.path.join(attachments_dir, tc_id)
+ if os.path.isdir(d):
+ attachments = os.listdir(d)
+ for filename in attachments:
+ if filename.endswith('.mimetype'):
+ continue
+ filepath = os.path.join(d, filename)
+ if os.path.exists(filepath + '.mimetype'):
+ mime_type = open(filepath + '.mimetype').read().strip()
+ else:
+ mime_type = mimetypes.guess_type(filepath)[0]
+ if mime_type is None:
+ mime_type = 'application/octet-stream'
+ attachment = create_attachment(filename, open(filepath).read(), mime_type)
+ res.setdefault('attachments', []).append(attachment)
+
results.append(res)
return results
@@ -169,6 +187,8 @@
stdout = _get_content(results_dir, '%s/stdout.log' % dirname)
attachments = _get_attachments(results_dir, dirname, testdef, stdout)
+ attachments_dir = os.path.join(results_dir, dirname, 'attachments')
+
testdef = yaml.load(testdef)
return {
@@ -176,7 +196,7 @@
'analyzer_assigned_date': now,
'analyzer_assigned_uuid': str(uuid4()),
'time_check_performed': False,
- 'test_results': _get_test_results(testdef, stdout),
+ 'test_results': _get_test_results(testdef, stdout, attachments_dir),
'software_context': swcontext,
'hardware_context': hwcontext,
'attachments': attachments,
@@ -215,4 +235,4 @@
except:
logging.exception('error processing results for: %s' % d)
- return {'test_runs': testruns, 'format': 'Dashboard Bundle Format 1.3'}
+ return {'test_runs': testruns, 'format': 'Dashboard Bundle Format 1.5'}
=== added file 'lava_test_shell/lava-test-case-attach'
@@ -0,0 +1,38 @@
+#NOTE the lava_test_shell_action fills in the proper interpreter path
+# above during target deployment
+
+set -x
+
+# basename is not present on AOSP builds, but the /*\// thing does not
+# work with dash (Ubuntu builds) or busybox (OpenEmbedded). Both of
+# those have basename though.
+which basename || basename () { echo ${1/*\//}; }
+
+usage () {
+ echo "USAGE"
+}
+
+TEST_CASE_ID="$1"
+shift
+FILE="$1"
+shift
+MIMETYPE="$1"
+shift
+if [ $# -gt 0 ]; then
+ usage
+ exit 1
+fi
+if [ ! -f "$FILE" ]; then
+ usage
+ exit 1
+fi
+if [ -z "$TEST_CASE_ID" ]; then
+ usage
+ exit 1
+fi
+# $LAVA_ATTACHMENT_DIR is set by lava-test-shell
+mkdir -p "$LAVA_ATTACHMENT_DIR/$TEST_CASE_ID"
+cp "$FILE" "$LAVA_ATTACHMENT_DIR/$TEST_CASE_ID"
+if [ ! -z "$MIMETYPE" ]; then
+ echo "$MIMETYPE" > "$LAVA_ATTACHMENT_DIR/$TEST_CASE_ID/$(basename FILE).mimetype"
+fi
=== modified file 'lava_test_shell/lava-test-shell'
@@ -6,6 +6,7 @@
shift
TEST=$*
RC=0
+export LAVA_ATTACHMENT_DIR=${ODIR}/attachments
{
$TEST
echo $? > ${ODIR}/return_code