=== added file 'lava_dispatcher/actions/lava_test_shell.py'
@@ -0,0 +1,260 @@
+#!/usr/bin/python
+
+# Copyright (C) 2012 Linaro Limited
+#
+# Author: Andy Doan <andy.doan@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+import json
+import logging
+import os
+import pexpect
+import shutil
+import stat
+import subprocess
+import tempfile
+
+import lava_dispatcher.lava_test_shell as lava_test_shell
+import lava_dispatcher.utils as utils
+
+from lava_dispatcher.actions import BaseAction
+from lava_dispatcher.device.target import Target
+from lava_dispatcher.downloader import download_image
+
+LAVA_TEST_DIR = '%s/../../lava_test_shell' % os.path.dirname(__file__)
+LAVA_TEST_ANDROID = '%s/lava-test-runner-android' % LAVA_TEST_DIR
+LAVA_TEST_UBUNTU = '%s/lava-test-runner-ubuntu' % LAVA_TEST_DIR
+LAVA_TEST_UPSTART = '%s/lava-test-runner.conf' % LAVA_TEST_DIR
+LAVA_TEST_SHELL = '%s/lava-test-shell' % LAVA_TEST_DIR
+
+Target.android_deployment_data['lava_test_runner'] = LAVA_TEST_ANDROID
+Target.android_deployment_data['lava_test_shell'] = LAVA_TEST_SHELL
+Target.android_deployment_data['lava_test_sh_cmd'] = '/system/bin/mksh'
+Target.android_deployment_data['lava_test_dir'] = '/system/lava'
+Target.android_deployment_data['lava_test_results_part_attr'] = 'data_part_android_org'
+Target.ubuntu_deployment_data['lava_test_runner'] = LAVA_TEST_UBUNTU
+Target.ubuntu_deployment_data['lava_test_shell'] = LAVA_TEST_SHELL
+Target.ubuntu_deployment_data['lava_test_sh_cmd'] = '/bin/sh'
+Target.ubuntu_deployment_data['lava_test_dir'] = '/lava'
+Target.ubuntu_deployment_data['lava_test_results_part_attr'] = 'root_part'
+
+
+def _configure_ubuntu_startup(etcdir):
+ logging.info('adding ubuntu upstart job')
+ shutil.copy(LAVA_TEST_UPSTART, '%s/init/' % etcdir)
+
+Target.ubuntu_deployment_data['lava_test_configure_startup'] = \
+ _configure_ubuntu_startup
+
+
+def _configure_android_startup(etcdir):
+ logging.info('hacking android start up job')
+ with open('%s/mkshrc' % etcdir, 'a') as f:
+ f.write('\n/system/lava/bin/lava-test-runner\n')
+
+Target.android_deployment_data['lava_test_configure_startup'] = \
+ _configure_android_startup
+
+
+class cmd_lava_test_shell(BaseAction):
+
+ parameters_schema = {
+ 'type': 'object',
+ 'properties': {
+ 'testdef_urls': {'type': 'array', 'items': {'type': 'string'}},
+ 'timeout': {'type': 'integer', 'optional': True},
+ },
+ 'additionalProperties': False,
+ }
+
+ def run(self, testdef_urls, timeout=-1):
+ target = self.client.target_device
+ self._assert_target(target)
+
+ self._configure_target(target, testdef_urls)
+
+ with target.runner() as runner:
+ patterns = [
+ '<LAVA_TEST_RUNNER>: exiting',
+ pexpect.EOF,
+ pexpect.TIMEOUT,
+ ]
+ idx = runner._connection.expect(patterns, timeout=timeout)
+ if idx == 0:
+ logging.info('lava_test_shell seems to have completed')
+ elif idx == 1:
+ logging.warn('lava_test_shell connection dropped')
+ elif idx == 2:
+ logging.warn('lava_test_shell has timed out')
+
+ self._bundle_results(target)
+
+ def _get_test_definition(self, testdef_url, tmpdir):
+ testdef_file = download_image(testdef_url, self.context, tmpdir)
+ with open(testdef_file, 'r') as f:
+ logging.info('loading test definition')
+ return json.load(f)
+
+ def _copy_runner(self, mntdir, target):
+ xmod = (stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP |
+ stat.S_IXOTH | stat.S_IROTH)
+ runner = target.deployment_data['lava_test_runner']
+ shell = target.deployment_data['lava_test_shell']
+ shutil.copy(runner, '%s/bin/lava-test-runner' % mntdir)
+ with open(shell, 'r') as fin:
+ with open('%s/bin/lava-test-shell' % mntdir, 'w') as fout:
+ shcmd = target.deployment_data['lava_test_sh_cmd']
+ fout.write("#!%s\n\n" % shcmd)
+ fout.write(fin.read())
+ os.fchmod(fout.fileno(), xmod)
+
+ def _bzr_info(self, url, bzrdir):
+ cwd = os.getcwd()
+ try:
+ os.chdir('%s' % bzrdir)
+ revno = subprocess.check_output(['bzr', 'revno']).strip()
+ return {
+ 'project_name': bzrdir,
+ 'branch_vcs': 'bzr',
+ 'branch_revision': revno,
+ 'branch_url': url,
+ }
+ finally:
+ os.chdir(cwd)
+
+ def _git_info(self, url, gitdir):
+ cwd = os.getcwd()
+ try:
+ os.chdir('%s' % gitdir)
+ commit_id = subprocess.check_output(
+ ['git', 'log', '-1', '--pretty=%H']).strip()
+ return {
+ 'project_name': url.rsplit('/')[-1],
+ 'branch_vcs': 'git',
+ 'branch_revision': commit_id,
+ 'branch_url': url,
+ }
+ finally:
+ os.chdir(cwd)
+
+ def _create_repos(self, testdef, testdir):
+ cwd = os.getcwd()
+ try:
+ os.chdir(testdir)
+ for repo in testdef['install'].get('bzr-repos', []):
+ logging.info("bzr branch %s" % repo)
+ subprocess.check_call(['bzr', 'branch', repo])
+ name = repo.replace('lp:', '').split('/')[-1]
+ self._sw_sources.append(self._bzr_info(repo, name))
+ for repo in testdef['install'].get('git-repos', []):
+ logging.info("git clone %s" % repo)
+ subprocess.check_call(['git', 'clone', repo])
+ name = os.path.splitext(os.path.basename(repo))[0]
+ self._sw_sources.append(self._git_info(repo, name))
+ finally:
+ os.chdir(cwd)
+
+ def _create_target_install(self, testdef, hostdir, targetdir):
+ with open('%s/install.sh' % hostdir, 'w') as f:
+ f.write('set -ex\n')
+ f.write('cd %s\n' % targetdir)
+
+ # TODO how should we handle this for Android?
+ if 'deps' in testdef['install']:
+ f.write('sudo apt-get update\n')
+ f.write('sudo apt-get install -y ')
+ for dep in testdef['install']['deps']:
+ f.write('%s ' % dep)
+ f.write('\n')
+
+ if 'steps' in testdef['install']:
+ for cmd in testdef['install']['steps']:
+ f.write('%s\n' % cmd)
+
+ def _copy_test(self, hostdir, targetdir, testdef):
+ self._sw_sources = []
+ utils.ensure_directory(hostdir)
+ with open('%s/testdef.json' % hostdir, 'w') as f:
+ f.write(json.dumps(testdef))
+
+ if 'install' in testdef:
+ self._create_repos(testdef, hostdir)
+ self._create_target_install(testdef, hostdir, targetdir)
+
+ with open('%s/run.sh' % hostdir, 'w') as f:
+ f.write('set -e\n')
+ f.write('cd %s\n' % targetdir)
+ for cmd in testdef['run']['steps']:
+ f.write('%s\n' % cmd)
+
+ def _mk_runner_dirs(self, mntdir):
+ utils.ensure_directory('%s/bin' % mntdir)
+ utils.ensure_directory_empty('%s/tests' % mntdir)
+
+ def _configure_target(self, target, testdef_urls):
+ ldir = target.deployment_data['lava_test_dir']
+
+ with target.file_system(target.config.root_part, 'lava') as d:
+ self._mk_runner_dirs(d)
+ self._copy_runner(d, target)
+ testdirs = []
+ for i, url in enumerate(testdef_urls):
+ testdef = self._get_test_definition(url, target.scratch_dir)
+ # android mount the partition under /system, while ubuntu
+ # mounts under /, so we have hdir for where it is on the host
+ # and tdir for how the target will see the path
+ hdir = '%s/tests/%d_%s' % (d, i, testdef['test_id'])
+ tdir = '%s/tests/%d_%s' % (ldir, i, testdef['test_id'])
+ self._copy_test(hdir, tdir, testdef)
+ testdirs.append(tdir)
+
+ with target.file_system(target.config.root_part, 'etc') as d:
+ target.deployment_data['lava_test_configure_startup'](d)
+ with open('%s/lava-test-runner.conf' % d, 'w') as f:
+ for testdir in testdirs:
+ f.write('%s\n' % testdir)
+
+ def _bundle_results(self, target):
+ """ Pulls the results from the target device and builds a bundle
+ """
+ results_part = target.deployment_data['lava_test_results_part_attr']
+ results_part = getattr(target.config, results_part)
+ rdir = self.context.host_result_dir
+
+ with target.file_system(results_part, 'lava/results') as d:
+ bundle = lava_test_shell.get_bundle(d, self._sw_sources)
+ utils.ensure_directory_empty(d)
+
+ (fd, name) = tempfile.mkstemp(
+ prefix='lava-test-shell', suffix='.bundle', dir=rdir)
+ with os.fdopen(fd, 'w') as f:
+ json.dump(bundle, f)
+
+ def _assert_target(self, target):
+ """ Ensure the target has the proper deployment data required by this
+ action. This allows us to exit the action early rather than going 75%
+ through the steps before discovering something required is missing
+ """
+ if not target.deployment_data:
+ raise RuntimeError('Target includes no deployment_data')
+
+ keys = ['lava_test_runner', 'lava_test_shell', 'lava_test_dir',
+ 'lava_test_configure_startup', 'lava_test_sh_cmd']
+ for k in keys:
+ if k not in target.deployment_data:
+ raise RuntimeError('Target deployment_data missing %s' % k)
=== modified file 'lava_dispatcher/config.py'
@@ -33,13 +33,13 @@
boot_cmds_oe = schema.StringOption(fatal=True) # And here?
boot_linaro_timeout = schema.IntOption(default=300)
boot_part = schema.IntOption(fatal=True)
- boot_part_android_org = schema.StringOption()
+ boot_part_android_org = schema.IntOption()
bootloader_prompt = schema.StringOption()
- cache_part_android_org = schema.StringOption()
+ cache_part_android_org = schema.IntOption()
client_type = schema.StringOption()
connection_command = schema.StringOption(fatal=True)
- data_part_android = schema.StringOption()
- data_part_android_org = schema.StringOption()
+ data_part_android = schema.IntOption()
+ data_part_android_org = schema.IntOption()
default_network_interface = schema.StringOption()
disablesuspend_timeout = schema.IntOption(default=240)
device_type = schema.StringOption(fatal=True)
@@ -57,11 +57,11 @@
qemu_machine_type = schema.StringOption()
reset_port_command = schema.StringOption()
root_part = schema.IntOption()
- sdcard_part_android = schema.StringOption()
- sdcard_part_android_org = schema.StringOption()
+ sdcard_part_android = schema.IntOption()
+ sdcard_part_android_org = schema.IntOption()
soft_boot_cmd = schema.StringOption(default="reboot")
- sys_part_android = schema.StringOption()
- sys_part_android_org = schema.StringOption()
+ sys_part_android = schema.IntOption()
+ sys_part_android_org = schema.IntOption()
tester_hostname = schema.StringOption(default="linaro")
tester_str = schema.StringOption()
val = schema.StringOption()
=== added file 'lava_dispatcher/lava_test_shell.py'
@@ -0,0 +1,210 @@
+# Copyright (C) 2011-2012 Linaro Limited
+#
+# Author: Andy Doan <andy.doan@linaro.org>
+#
+# This file is part of LAVA Dispatcher.
+#
+# LAVA Dispatcher is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# LAVA Dispatcher is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along
+# with this program; if not, see <http://www.gnu.org/licenses>.
+
+import datetime
+import errno
+import json
+import logging
+import os
+import re
+
+from uuid import uuid4
+
+from lava_dispatcher.test_data import create_attachment
+
+
+def _get_cpus(cpuinfo):
+ devices = []
+ cpu_type = '?'
+ cpu_cores = 0
+ cpu_attrs = {}
+ board_type = '?'
+ board_rev = '?'
+ for line in cpuinfo.split('\n'):
+ if len(line.strip()) == 0:
+ continue
+ (key, val) = line.split(':', 1)
+ key = key.strip()
+ val = val.strip()
+
+ if key == 'Processor':
+ cpu_type = val
+ elif key == 'processor':
+ cpu_cores += 1
+ elif key == 'Hardware':
+ board_type = val
+ elif key == 'Revision':
+ board_rev = val
+ else:
+ cpu_attrs[key] = val
+
+ cpu_attrs['cpu_type'] = cpu_type
+
+ for i in xrange(cpu_cores):
+ x = {
+ 'device_type': 'device.cpu',
+ 'description': 'Processor #%d' % i,
+ 'attributes': cpu_attrs
+ }
+ devices.append(x)
+
+ devices.append({
+ 'device_type': 'device.board',
+ 'description': board_type,
+ 'attributes': {'revision': board_rev}
+ })
+
+ return devices
+
+
+def _get_mem(meminfo):
+ for line in meminfo.split('\n'):
+ if line.startswith('MemTotal'):
+ (k, v) = line.split(':', 1)
+ return {
+ 'device_type': 'device.mem',
+ 'description': '%s of RAM' % v.strip(),
+ }
+
+ return None
+
+
+def _get_hw_context(cpuinfo, meminfo):
+ devices = []
+ if cpuinfo:
+ devices.extend(_get_cpus(cpuinfo))
+ if meminfo:
+ devices.append(_get_mem(meminfo))
+ return {'devices': devices}
+
+
+def _get_sw_context(build, pkgs, sw_sources):
+ ctx = {}
+ ctx['image'] = {'name': build}
+
+ pkglist = []
+ pattern = re.compile(
+ ("^\s*package:\s*(?P<package_name>[^:]+?)\s*:"
+ "\s*(?P<version>[^\s].+)\s*$"), re.M)
+ for line in pkgs.split('\n'):
+ match = pattern.search(line)
+ if match:
+ name, version = match.groups()
+ pkglist.append({'name': name.strip(), 'version': version})
+
+ ctx['packages'] = pkglist
+ ctx['sources'] = sw_sources
+ return ctx
+
+
+def _get_test_results(testdef, stdout):
+ results = []
+
+ pattern = re.compile(testdef['parse']['pattern'])
+
+ fixupdict = {}
+ if 'fixupdict' in testdef['parse']:
+ fixupdict = testdef['parse']['fixupdict']
+
+ for line in stdout.split('\n'):
+ match = pattern.match(line.strip())
+ if match:
+ res = match.groupdict()
+ if 'result' in res:
+ if res['result'] in fixupdict:
+ res['result'] = fixupdict[res['result']]
+ if res['result'] not in ('pass', 'fail', 'skip', 'unknown'):
+ logging.error('bad test result line: %s' % line.strip())
+ continue
+ results.append(res)
+
+ return results
+
+
+def _get_attachments(results_dir, dirname, testdef, stdout):
+ files = ('stderr.log', 'return_code', 'run.sh', 'install.sh')
+ attachments = []
+
+ attachments.append(create_attachment('stdout.txt', stdout))
+ attachments.append(create_attachment('testdef.json', testdef))
+
+ for f in files:
+ fname = '%s/%s' % (dirname, f)
+ buf = _get_content(results_dir, fname, ignore_errors=True)
+ if buf:
+ attachments.append(create_attachment(f, buf))
+
+ return attachments
+
+
+def _get_test_run(results_dir, dirname, hwcontext, swcontext):
+ now = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
+
+ testdef = _get_content(results_dir, '%s/testdef.json' % dirname)
+ stdout = _get_content(results_dir, '%s/stdout.log' % dirname)
+ attachments = _get_attachments(results_dir, dirname, testdef, stdout)
+
+ testdef = json.loads(testdef)
+
+ return {
+ 'test_id': testdef['test_id'],
+ 'analyzer_assigned_date': now,
+ 'analyzer_assigned_uuid': str(uuid4()),
+ 'time_check_performed': False,
+ 'test_results': _get_test_results(testdef, stdout),
+ 'software_context': swcontext,
+ 'hardware_context': hwcontext,
+ 'attachments': attachments,
+ }
+
+
+def _get_content(results_dir, fname, ignore_errors=False):
+ try:
+ with open(os.path.join(results_dir, fname), 'r') as f:
+ return f.read()
+ except IOError as e:
+ if e.errno != errno.ENOENT or not ignore_errors:
+ logging.exception('Error while reading %s' % fname)
+ if ignore_errors:
+ return ''
+
+
+def get_bundle(results_dir, sw_sources):
+ """
+ iterates through a results directory to build up a bundle formatted for
+ the LAVA dashboard
+ """
+ testruns = []
+ cpuinfo = _get_content(results_dir, './cpuinfo.txt', ignore_errors=True)
+ meminfo = _get_content(results_dir, './meminfo.txt', ignore_errors=True)
+ hwctx = _get_hw_context(cpuinfo, meminfo)
+
+ build = _get_content(results_dir, './build.txt')
+ pkginfo = _get_content(results_dir, './pkgs.txt', ignore_errors=True)
+ swctx = _get_sw_context(build, pkginfo, sw_sources)
+
+ for d in os.listdir(results_dir):
+ if os.path.isdir(os.path.join(results_dir, d)):
+ try:
+ testruns.append(_get_test_run(results_dir, d, hwctx, swctx))
+ except:
+ logging.exception('error processing results for: %d' % d)
+
+ return {'test_runs': testruns, 'format': 'Dashboard Bundle Format 1.3'}
=== modified file 'lava_dispatcher/utils.py'
@@ -56,8 +56,8 @@
def mkdtemp(basedir='/tmp'):
- ''' returns a temporary directory that's deleted when the process exits
- '''
+ """ returns a temporary directory that's deleted when the process exits
+ """
d = tempfile.mkdtemp(dir=basedir)
atexit.register(shutil.rmtree, d)
@@ -66,12 +66,21 @@
def ensure_directory(path):
- ''' ensures the path exists, if it doesn't it will be created
- '''
+ """ ensures the path exists, if it doesn't it will be created
+ """
if not os.path.exists(path):
os.mkdir(path)
+def ensure_directory_empty(path):
+ """ Ensures the given directorty path exists, and is empty. It will delete
+ The directory contents if needed.
+ """
+ if os.path.exists(path):
+ shutil.rmtree(path)
+ os.mkdir(path)
+
+
def url_to_cache(url, cachedir):
url_parts = urlparse.urlsplit(url)
path = os.path.join(cachedir, url_parts.netloc,
=== added directory 'lava_test_shell'
=== added file 'lava_test_shell/lava-test-runner-android'
@@ -0,0 +1,84 @@
+#!/system/bin/mksh
+
+LCK=${LCK-"/lava-test-runner.lck"}
+
+#make sure we are only run once
+if [ ! -f ${LCK} ] ; then
+ ( flock -n 9 || exit 1 ; true ) 9>${LCK}
+else
+ exit 0
+fi
+
+# make sure we log to serial console
+exec >/dev/console
+
+PREFIX="<LAVA_TEST_RUNNER>:"
+WORKFILE="/etc/lava-test-runner.conf"
+RESULTSDIR="/data/lava/results"
+BINDIR="/system/lava/bin"
+
+hwcontext()
+{
+ cpuinfo=${RESULTSDIR}/cpuinfo.txt
+ meminfo=${RESULTSDIR}/meminfo.txt
+
+ [ -f ${cpuinfo} ] || cat /proc/cpuinfo > ${cpuinfo}
+ [ -f ${meminfo} ] || cat /proc/meminfo > ${meminfo}
+}
+
+swcontext()
+{
+ build=${RESULTSDIR}/build.txt
+ pkgs=${RESULTSDIR}/pkgs.txt
+
+ [ -f ${build} ] || getprop ro.build.display.id > ${build}
+ [ -f ${pkgs} ] || pm list packages -v > ${pkgs}
+}
+
+# in background, since we don't have this working as a proper android service
+{
+ export PATH=${BINDIR}:${PATH}
+ echo "${PREFIX} started"
+ [ -d ${RESULTSDIR} ] || mkdir -p ${RESULTSDIR}
+
+ echo "${PREFIX} disabling suspend and waiting for home screen ..."
+ disablesuspend.sh
+
+ echo "${PREFIX} looking for installation work in ${WORKFILE}"
+ while read line ; do
+ test=`basename $line`
+ if [ -f ${line}/install.sh ] ; then
+ testdir=${line%/} # trim off trailing slash iff it exists
+ test=${testdir/*\//}
+ echo "${PREFIX} running ${test} installer ..."
+ /system/bin/sh ${line}/install.sh
+ if [ $? -ne 0 ] ; then
+ echo "${PREFIX} ${test} installer failed, exiting"
+ hwcontext
+ swcontext
+ exit 1
+ fi
+ fi
+ done < ${WORKFILE}
+
+ echo "${PREFIX} save hardware/software context info..."
+ hwcontext
+ swcontext
+
+ echo "${PREFIX} looking for work in ${WORKFILE}"
+ while read line ; do
+ # we don't have "basename" on android, but this is does the
+ # equivalent under mksh
+ test=${testdir/*\//}
+ echo "${PREFIX} running ${test} under lava-test-shell..."
+ odir=${RESULTSDIR}/${test}-`date +%s`
+ mkdir ${odir}
+ cp ${line}/testdef.json ${odir}/
+ cp ${line}/run.sh ${odir}/
+ [ -f ${line}/install.sh ] && cp ${line}/install.sh ${odir}/
+ lava-test-shell --output_dir ${odir} /system/bin/sh -e "${line}/run.sh"
+ echo "${PREFIX} ${test} exited with: `cat ${odir}/return_code`"
+ done < ${WORKFILE}
+ echo "${PREFIX} exiting"
+} &
+
=== added file 'lava_test_shell/lava-test-runner-ubuntu'
@@ -0,0 +1,67 @@
+#!/bin/sh
+
+# make sure we log to serial console
+exec >/dev/console
+
+PREFIX="<LAVA_TEST_RUNNER>:"
+WORKFILE="/etc/lava-test-runner.conf"
+RESULTSDIR="/lava/results"
+BINDIR="/lava/bin"
+
+hwcontext()
+{
+ cpuinfo=${RESULTSDIR}/cpuinfo.txt
+ meminfo=${RESULTSDIR}/meminfo.txt
+
+ [ -f ${cpuinfo} ] || cat /proc/cpuinfo > ${cpuinfo}
+ [ -f ${meminfo} ] || cat /proc/meminfo > ${meminfo}
+}
+
+swcontext()
+{
+ build=${RESULTSDIR}/build.txt
+ pkgs=${RESULTSDIR}/pkgs.txt
+
+ [ -f ${build} ] || cat /etc/lsb-release | grep DESCRIPTION | cut -d\" -f2 > ${build}
+ # this does a query of installed packaged that will look similar to
+ # what android's package list does
+ [ -f ${pkgs} ] || dpkg-query -W -f '${status} ${package} : ${version}\n' | sed -n 's/^install ok installed/package:/p' > ${pkgs}
+}
+
+export PATH=${BINDIR}:${PATH}
+echo "${PREFIX} started"
+[ -d ${RESULTSDIR} ] || mkdir -p ${RESULTSDIR}
+
+echo "${PREFIX} looking for installation work in ${WORKFILE}"
+while read line ; do
+ test=`basename $line`
+ if [ -f ${line}/install.sh ] ; then
+ echo "${PREFIX} running ${test} installer ..."
+ /bin/sh ${line}/install.sh
+ if [ $? -ne 0 ] ; then
+ echo "${PREFIX} ${test} installer failed, exiting"
+ hwcontext
+ swcontext
+ exit 1
+ fi
+ fi
+done < ${WORKFILE}
+
+echo "${PREFIX} save hardware/software context info..."
+hwcontext
+swcontext
+
+echo "${PREFIX} looking for work in ${WORKFILE}"
+while read line ; do
+ test=`basename $line`
+ echo "${PREFIX} running ${test} under lava-test-shell..."
+ odir=${RESULTSDIR}/${test}-`date +%s`
+ mkdir ${odir}
+ cp ${line}/testdef.json ${odir}/
+ cp ${line}/run.sh ${odir}/
+ [ -f ${line}/install.sh ] && cp ${line}/install.sh ${odir}/
+ lava-test-shell --output_dir ${odir} /bin/sh -e "${line}/run.sh"
+ echo "${PREFIX} ${test} exited with: `cat ${odir}/return_code`"
+done < ${WORKFILE}
+echo "${PREFIX} exiting"
+
=== added file 'lava_test_shell/lava-test-runner.conf'
@@ -0,0 +1,12 @@
+# lava-test-runner
+#
+# This is an upstart job to start our lava-test-runner once the system is ready
+
+description "Launches the lava-test-runner at system start"
+author "Linaro Validation Team"
+
+start on (filesystem and runlevel [2])
+
+console output
+
+exec /lava/bin/lava-test-runner
=== added file 'lava_test_shell/lava-test-shell'
@@ -0,0 +1,12 @@
+#NOTE the lava_test_shell_action fills in the proper interpreter path
+# above during target deployment
+
+shift
+ODIR=$1
+shift
+TEST=$*
+RC=0
+{
+ $TEST
+ echo $? > ${ODIR}/return_code
+} 2>&1 | tee ${ODIR}/stdout.log
=== modified file 'setup.py'
@@ -27,6 +27,14 @@
'default-config/lava-dispatcher/devices/*.conf',
],
},
+ data_files=[
+ ('lava_test_shell', [
+ 'lava_test_shell/lava-test-runner-android',
+ 'lava_test_shell/lava-test-runner-ubuntu',
+ 'lava_test_shell/lava-test-runner.conf',
+ 'lava_test_shell/lava-test-shell',
+ ])
+ ],
install_requires=[
"json-schema-validator >= 2.3",
"lava-tool >= 0.4",