diff mbox

[Branch,~linaro-validation/lava-dispatcher/trunk] Rev 685: This package is obsolete

Message ID 20130919175430.24808.81255.launchpad@ackee.canonical.com
State Accepted
Headers show

Commit Message

Antonio Terceiro Sept. 19, 2013, 5:54 p.m. UTC
------------------------------------------------------------
revno: 685
committer: Antonio Terceiro <antonio.terceiro@linaro.org>
branch nick: trunk
timestamp: Thu 2013-09-19 14:47:31 -0300
message:
  This package is obsolete
removed:
  COPYING
  MANIFEST.in
  README
  doc/
  doc/QUICKSTART
  doc/arm_energy_probe.rst
  doc/changes.rst
  doc/conf.py
  doc/configuration.rst@
  doc/cu-ttyUSB0.cf
  doc/debugging.rst
  doc/examples/
  doc/examples/jobs/
  doc/examples/jobs/lava-android-test.json
  doc/examples/jobs/lava-ltp-job.json
  doc/examples/jobs/lava-out-of-tree-test-1.json
  doc/examples/jobs/lava-out-of-tree-test-2.json
  doc/examples/jobs/test_with_testoptions.json
  doc/examples/plugins/
  doc/examples/plugins/demo-action-plugin/
  doc/examples/plugins/demo-action-plugin/README
  doc/examples/plugins/demo-action-plugin/demo-action-plugin.json
  doc/examples/plugins/demo-action-plugin/demo_action_plugin/
  doc/examples/plugins/demo-action-plugin/demo_action_plugin/__init__.py
  doc/examples/plugins/demo-action-plugin/demo_action_plugin/foo.py
  doc/examples/plugins/demo-action-plugin/setup.py
  doc/external_measurement.rst
  doc/index.rst
  doc/jobfile-android.rst
  doc/jobfile-lmc.rst
  doc/jobfile-prebuilt.rst
  doc/jobfile.rst
  doc/lava_test_shell.rst
  doc/multinode-usecases.rst
  doc/multinode.rst
  doc/multinodeapi.rst
  doc/proxy.rst
  doc/sdmux.png
  doc/sdmux.rst
  doc/standalonesetup.rst
  doc/usage.rst
  doc/usecaseone.rst
  doc/usecasetwo.rst
  lava/
  lava-dispatch
  lava/__init__.py
  lava/dispatcher/
  lava/dispatcher/__init__.py
  lava/dispatcher/commands.py
  lava/dispatcher/node.py
  lava_dispatcher/
  lava_dispatcher/__init__.py
  lava_dispatcher/actions/
  lava_dispatcher/actions/__init__.py
  lava_dispatcher/actions/android_install_binaries.py
  lava_dispatcher/actions/android_install_cts_medias.py
  lava_dispatcher/actions/boot_control.py
  lava_dispatcher/actions/deploy.py
  lava_dispatcher/actions/launch_control.py
  lava_dispatcher/actions/lava_android_test.py
  lava_dispatcher/actions/lava_test.py
  lava_dispatcher/actions/lava_test_shell.py
  lava_dispatcher/actions/lmp/
  lava_dispatcher/actions/lmp/__init__.py
  lava_dispatcher/actions/lmp/board.py
  lava_dispatcher/actions/lmp/ethsata.py
  lava_dispatcher/actions/lmp/hdmi.py
  lava_dispatcher/actions/lmp/lsgpio.py
  lava_dispatcher/actions/lmp/sdmux.py
  lava_dispatcher/actions/lmp/usb.py
  lava_dispatcher/client/
  lava_dispatcher/client/__init__.py
  lava_dispatcher/client/base.py
  lava_dispatcher/client/lmc_utils.py
  lava_dispatcher/client/targetdevice.py
  lava_dispatcher/config.py
  lava_dispatcher/context.py
  lava_dispatcher/default-config/
  lava_dispatcher/default-config/lava-dispatcher/
  lava_dispatcher/default-config/lava-dispatcher/README
  lava_dispatcher/default-config/lava-dispatcher/device-defaults.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/
  lava_dispatcher/default-config/lava-dispatcher/device-types/aa9.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/arndale.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/beagle-xm.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/beaglebone-black.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/beaglebone.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/capri.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/highbank.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/k3v2.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/keystone.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/kvm.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/mx51evk.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/mx53loco.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/nexus.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/nexus10.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/origen.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/panda.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/qemu.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_foundation-armv8.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_ve-a15x1-a7x1.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_ve-a15x4-a7x4.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_ve-armv8.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/snowball.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/vexpress-tc2.conf
  lava_dispatcher/default-config/lava-dispatcher/device-types/vexpress.conf
  lava_dispatcher/default-config/lava-dispatcher/devices/
  lava_dispatcher/default-config/lava-dispatcher/lava-dispatcher.conf
  lava_dispatcher/device/
  lava_dispatcher/device/__init__.py
  lava_dispatcher/device/boot_options.py
  lava_dispatcher/device/bootloader.py
  lava_dispatcher/device/capri.py
  lava_dispatcher/device/fastboot.py
  lava_dispatcher/device/fastmodel.py
  lava_dispatcher/device/ipmi_pxe.py
  lava_dispatcher/device/k3v2.py
  lava_dispatcher/device/master.py
  lava_dispatcher/device/nexus10.py
  lava_dispatcher/device/qemu.py
  lava_dispatcher/device/sdmux.py
  lava_dispatcher/device/target.py
  lava_dispatcher/device/vexpress.py
  lava_dispatcher/downloader.py
  lava_dispatcher/errors.py
  lava_dispatcher/ipmi.py
  lava_dispatcher/job.py
  lava_dispatcher/lava_test_shell.py
  lava_dispatcher/signals/
  lava_dispatcher/signals/__init__.py
  lava_dispatcher/signals/armprobe.py
  lava_dispatcher/signals/duration.py
  lava_dispatcher/signals/shellhooks.py
  lava_dispatcher/tarballcache.py
  lava_dispatcher/test_data.py
  lava_dispatcher/tests/
  lava_dispatcher/tests/__init__.py
  lava_dispatcher/tests/helper.py
  lava_dispatcher/tests/test-config/
  lava_dispatcher/tests/test-config/bin/
  lava_dispatcher/tests/test-config/bin/fake-qemu
  lava_dispatcher/tests/test_config.py
  lava_dispatcher/tests/test_device_version.py
  lava_dispatcher/utils.py
  lava_test_shell/
  lava_test_shell/README
  lava_test_shell/distro/
  lava_test_shell/distro/android/
  lava_test_shell/distro/android/lava-test-runner
  lava_test_shell/distro/fedora/
  lava_test_shell/distro/fedora/lava-install-packages
  lava_test_shell/distro/fedora/lava-installed-packages
  lava_test_shell/distro/fedora/lava-os-build
  lava_test_shell/distro/ubuntu/
  lava_test_shell/distro/ubuntu/lava-install-packages
  lava_test_shell/distro/ubuntu/lava-installed-packages
  lava_test_shell/distro/ubuntu/lava-os-build
  lava_test_shell/lava-installed-packages
  lava_test_shell/lava-os-build
  lava_test_shell/lava-test-case
  lava_test_shell/lava-test-case-attach
  lava_test_shell/lava-test-run-attach
  lava_test_shell/lava-test-runner
  lava_test_shell/lava-test-shell
  lava_test_shell/multi_node/
  lava_test_shell/multi_node/lava-group
  lava_test_shell/multi_node/lava-multi-node.lib
  lava_test_shell/multi_node/lava-network
  lava_test_shell/multi_node/lava-role
  lava_test_shell/multi_node/lava-self
  lava_test_shell/multi_node/lava-send
  lava_test_shell/multi_node/lava-sync
  lava_test_shell/multi_node/lava-wait
  lava_test_shell/multi_node/lava-wait-all
  requirements.txt
  setup.cfg
  setup.py
added:
  README.obsolete.txt


--
lp:lava-dispatcher
https://code.launchpad.net/~linaro-validation/lava-dispatcher/trunk

You are subscribed to branch lp:lava-dispatcher.
To unsubscribe from this branch go to https://code.launchpad.net/~linaro-validation/lava-dispatcher/trunk/+edit-subscription
diff mbox

Patch

=== removed file 'COPYING'
--- COPYING	2011-06-23 18:28:42 +0000
+++ COPYING	1970-01-01 00:00:00 +0000
@@ -1,339 +0,0 @@ 
-		    GNU GENERAL PUBLIC LICENSE
-		       Version 2, June 1991
-
- Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-			    Preamble
-
-  The licenses for most software are designed to take away your
-freedom to share and change it.  By contrast, the GNU General Public
-License is intended to guarantee your freedom to share and change free
-software--to make sure the software is free for all its users.  This
-General Public License applies to most of the Free Software
-Foundation's software and to any other program whose authors commit to
-using it.  (Some other Free Software Foundation software is covered by
-the GNU Lesser General Public License instead.)  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-this service if you wish), that you receive source code or can get it
-if you want it, that you can change the software or use pieces of it
-in new free programs; and that you know you can do these things.
-
-  To protect your rights, we need to make restrictions that forbid
-anyone to deny you these rights or to ask you to surrender the rights.
-These restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must give the recipients all the rights that
-you have.  You must make sure that they, too, receive or can get the
-source code.  And you must show them these terms so they know their
-rights.
-
-  We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
-
-  Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software.  If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
-
-  Finally, any free program is threatened constantly by software
-patents.  We wish to avoid the danger that redistributors of a free
-program will individually obtain patent licenses, in effect making the
-program proprietary.  To prevent this, we have made it clear that any
-patent must be licensed for everyone's free use or not licensed at all.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-		    GNU GENERAL PUBLIC LICENSE
-   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-  0. This License applies to any program or other work which contains
-a notice placed by the copyright holder saying it may be distributed
-under the terms of this General Public License.  The "Program", below,
-refers to any such program or work, and a "work based on the Program"
-means either the Program or any derivative work under copyright law:
-that is to say, a work containing the Program or a portion of it,
-either verbatim or with modifications and/or translated into another
-language.  (Hereinafter, translation is included without limitation in
-the term "modification".)  Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope.  The act of
-running the Program is not restricted, and the output from the Program
-is covered only if its contents constitute a work based on the
-Program (independent of having been made by running the Program).
-Whether that is true depends on what the Program does.
-
-  1. You may copy and distribute verbatim copies of the Program's
-source code as you receive it, in any medium, provided that you
-conspicuously and appropriately publish on each copy an appropriate
-copyright notice and disclaimer of warranty; keep intact all the
-notices that refer to this License and to the absence of any warranty;
-and give any other recipients of the Program a copy of this License
-along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
-
-  2. You may modify your copy or copies of the Program or any portion
-of it, thus forming a work based on the Program, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
-
-    a) You must cause the modified files to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in
-    whole or in part contains or is derived from the Program or any
-    part thereof, to be licensed as a whole at no charge to all third
-    parties under the terms of this License.
-
-    c) If the modified program normally reads commands interactively
-    when run, you must cause it, when started running for such
-    interactive use in the most ordinary way, to print or display an
-    announcement including an appropriate copyright notice and a
-    notice that there is no warranty (or else, saying that you provide
-    a warranty) and that users may redistribute the program under
-    these conditions, and telling the user how to view a copy of this
-    License.  (Exception: if the Program itself is interactive but
-    does not normally print such an announcement, your work based on
-    the Program is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole.  If
-identifiable sections of that work are not derived from the Program,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works.  But when you
-distribute the same sections as part of a whole which is a work based
-on the Program, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
-
-  3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable
-    source code, which must be distributed under the terms of Sections
-    1 and 2 above on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three
-    years, to give any third party, for a charge no more than your
-    cost of physically performing source distribution, a complete
-    machine-readable copy of the corresponding source code, to be
-    distributed under the terms of Sections 1 and 2 above on a medium
-    customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer
-    to distribute corresponding source code.  (This alternative is
-    allowed only for noncommercial distribution and only if you
-    received the program in object code or executable form with such
-    an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for
-making modifications to it.  For an executable work, complete source
-code means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to
-control compilation and installation of the executable.  However, as a
-special exception, the source code distributed need not include
-anything that is normally distributed (in either source or binary
-form) with the major components (compiler, kernel, and so on) of the
-operating system on which the executable runs, unless that component
-itself accompanies the executable.
-
-If distribution of executable or object code is made by offering
-access to copy from a designated place, then offering equivalent
-access to copy the source code from the same place counts as
-distribution of the source code, even though third parties are not
-compelled to copy the source along with the object code.
-
-  4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License.  Any attempt
-otherwise to copy, modify, sublicense or distribute the Program is
-void, and will automatically terminate your rights under this License.
-However, parties who have received copies, or rights, from you under
-this License will not have their licenses terminated so long as such
-parties remain in full compliance.
-
-  5. You are not required to accept this License, since you have not
-signed it.  However, nothing else grants you permission to modify or
-distribute the Program or its derivative works.  These actions are
-prohibited by law if you do not accept this License.  Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Program or works based on it.
-
-  6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions.  You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties to
-this License.
-
-  7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Program at all.  For example, if a patent
-license would not permit royalty-free redistribution of the Program by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is
-implemented by public license practices.  Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-
-  8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License
-may add an explicit geographical distribution limitation excluding
-those countries, so that distribution is permitted only in or among
-countries not thus excluded.  In such case, this License incorporates
-the limitation as if written in the body of this License.
-
-  9. The Free Software Foundation may publish revised and/or new versions
-of the General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-Each version is given a distinguishing version number.  If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and conditions
-either of that version or of any later version published by the Free
-Software Foundation.  If the Program does not specify a version number of
-this License, you may choose any version ever published by the Free Software
-Foundation.
-
-  10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the author
-to ask for permission.  For software which is copyrighted by the Free
-Software Foundation, write to the Free Software Foundation; we sometimes
-make exceptions for this.  Our decision will be guided by the two goals
-of preserving the free status of all derivatives of our free software and
-of promoting the sharing and reuse of software generally.
-
-			    NO WARRANTY
-
-  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
-FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
-OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
-PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
-OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
-TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
-PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
-REPAIR OR CORRECTION.
-
-  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
-REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
-INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
-OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
-TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
-YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
-PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGES.
-
-		     END OF TERMS AND CONDITIONS
-
-	    How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-convey the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the program's name and a brief idea of what it does.>
-    Copyright (C) <year>  <name of author>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License along
-    with this program; if not, write to the Free Software Foundation, Inc.,
-    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this
-when it starts in an interactive mode:
-
-    Gnomovision version 69, Copyright (C) year name of author
-    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-    This is free software, and you are welcome to redistribute it
-    under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License.  Of course, the commands you use may
-be called something other than `show w' and `show c'; they could even be
-mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary.  Here is a sample; alter the names:
-
-  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
-  `Gnomovision' (which makes passes at compilers) written by James Hacker.
-
-  <signature of Ty Coon>, 1 April 1989
-  Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into
-proprietary programs.  If your program is a subroutine library, you may
-consider it more useful to permit linking proprietary applications with the
-library.  If this is what you want to do, use the GNU Lesser General
-Public License instead of this License.

=== removed file 'MANIFEST.in'
--- MANIFEST.in	2012-10-23 03:21:33 +0000
+++ MANIFEST.in	1970-01-01 00:00:00 +0000
@@ -1,3 +0,0 @@ 
-include README
-recursive-include lava_dispatcher/default-config *.conf
-include lava_test_shell/*

=== removed file 'README'
--- README	2012-03-13 01:35:51 +0000
+++ README	1970-01-01 00:00:00 +0000
@@ -1,26 +0,0 @@ 
-LAVA dispatcher - automated testing of Linaro images
-====================================================
-
-The LAVA dispatcher allows automated testing of Linaro images.
-
-Quickstart
-----------
-
-See doc/QUICKSTART.
-
-
-Android Debug Bridge (adb) installation
----------------------------------------
-
-To run android test job, adb is common to be used.
-
-For installing adb, you can download the SDK from 
-http://developer.android.com/sdk/index.html
-
-By default, the SDK files are unpacked into a directory named 
-android-sdk-<machine-platform>.
-
-You can find the adb tool in <sdk>/platform-tools/.
-
-You might want to add the location of the SDK's tools/ and platform-tools 
-to your PATH environment variable, to provide easy access to the tools.

=== added file 'README.obsolete.txt'
--- README.obsolete.txt	1970-01-01 00:00:00 +0000
+++ README.obsolete.txt	2013-09-19 17:47:31 +0000
@@ -0,0 +1,3 @@ 
+This package is obsolete.
+
+See http://git.linaro.org/gitweb?p=lava/lava-dispatcher.git instead

=== removed directory 'doc'
=== removed file 'doc/QUICKSTART'
--- doc/QUICKSTART	2013-06-12 14:20:52 +0000
+++ doc/QUICKSTART	1970-01-01 00:00:00 +0000
@@ -1,157 +0,0 @@ 
-Dispatcher Quick Start
-======================
-
-Dependencies
-------------
-
-LAVA needs python >= 2.6.  For the dispatcher, you need the pexpect
-module.
-
-
-Configuring for local development
----------------------------------
-
-Configuring conmux
-++++++++++++++++++
-
-You will need to have a card containing a 'master image' for your
-board.  The process of creating a master image is outlined on
-https://wiki.linaro.org/Platform/Validation/Specs/MasterBootImage.
-
-For LAVA development and testing using only locally attached resources,
-you should be able to make use of most features, even without the use of
-special equipment such as a console server.
-
-First install conmux and cu::
-
-    sudo add-apt-repository ppa:linaro-maintainers/tools
-    sudo apt-get update
-    sudo apt-get install conmux cu
-
-Connect a development board to a local serial device (e.g. ttyUSB0).
-You may have permission problem with cu running as root under conmux.
-
-Create a configuration file for your board under /etc/conmux which
-should look something like this::
-
-    listener panda01
-    application console 'panda01 console' 'cu -l /dev/ttyUSB0 -s 115200'
-
-Make sure to give the file a '.cf' extension (e.g. panda01.cf).
-
-If you see this permission problem when running cu, you can try
-adjusting your .cf file to call cu using sg, and the group name owning
-the device.  For example::
-
-    sg dialout "cu -l ttyUSB0 -s 115200"
-
-Finally restart conmux::
-
-    sudo stop conmux
-    sudo start conmux
-
-You can test the connection using::
-
-    conmux-console panda01
-    (use ~$quit to exit)
-
-You should be able to type commands and interact with the shell inside
-conmux-console.  If you cannot, run "sudo stop conmux" and try running
-'sg dialout "cu -l ttyUSB0 -s 115200"'.  If that doesn't work, you
-probably need to add some files to /etc/uucp.  Add ::
-
-    port ttyUSB0
-    type direct
-    device /dev/ttyUSB0
-    hardflow false
-    speed 115200
-
-to /etc/uucp/port and append ::
-
-    system  panda01
-    port    ttyUSB0
-    time    any
-
-to /etc/uucp/sys.  If this doesn't let you interact with the shell in
-conmux-console, complain in #linaro on freenode and hopefully someone
-will help you out :)
-
-
-Configuring the dispatcher
-++++++++++++++++++++++++++
-
-The dispatcher looks for config values in ~/.config/lava-dispatcher/, then
-/etc/lava-dispatcher (inside the current virtual environment if any, or
-/etc/lava-dispatcher itself otherwise), then its own source tree.
-
-To get started, you ned to make a directory for storing artefacts
-before they are downloaded to the device being tested
-(LAVA_IMAGE_TMPDIR in the config and /linaro/images/tmp by default)::
-
-    # mkdir -p /linaro/images/tmp
-
-You will need to set LAVA_SERVER_IP to the address of the machine
-running the dispatcher in ~/.config/lava-dispatcher/lava-dispatcher.conf::
-
-    $ cat ~/.config/lava-dispatcher/lava-dispatcher.conf
-    LAVA_SERVER_IP = 192.168.88.77
-
-You can set LAVA_PROXY if you have a proxy available in
-~/.config/lava-dispatcher/lava-dispatcher.conf, squid proxy service
-is preferred, if no, please leave it blank::
-
-    $ cat ~/.config/lava-dispatcher/lava-dispatcher.conf
-    LAVA_PROXY = http://192.168.88.77:3128/
-
-The dispatcher will use ``pip`` to install the lava-test package by default.
-You may want to use ``apt-get`` to install a ``.deb`` package instead, and
-will need to set the LAVA_TEST_DEB, with the name of the package::
-
-    $ cat ~/.config/lava-dispatcher/lava-dispatcher.conf
-    LAVA_TEST_DEB = lava-test
-
-You will need to add a configuration file for your device. It can be
-extremely simple, just identifying the type of the device::
-
-    $ cat ~/.config/lava-dispatcher/devices/panda01.conf
-    device_type = panda
-
-You also need a webserver such as Apache set up, and serve
-LAVA_IMAGE_TMPDIR/images as /images.  This snippet does this for
-Apache::
-
-    Alias /images/ "/linaro/images/"
-    <Directory "/linaro/images/">
-        Options Indexes MultiViews FollowSymLinks
-    </Directory>
-
-If you want to upload the results to a local dashboard instance, you
-need to set one of those up.  XXX link to doc on this!
-
-Modify the server in job file doc/examples/jobs/lava-ltp-job.json to local 
-lava-dashboard url, like:
-
-    {
-      "command": "submit_results",
-      "parameters":
-        {
-          "server": "http://staging.linaro.dev/lava-server/RPC2/",
-          "stream": "/anonymous/panda01-ltp/"
-        }
-    }
-
-
-A note on logging: Conmux keeps logs in /var/log/conmux.  If it cannot
-attach to the device, it may continually send output to the log every
-few seconds.  For non-production purposes, you may simply want to run
-'sudo stop conmux' when you are not using the console, to avoid
-needlessly growing large logfiles.
-
-
-Dispatching a job
------------------
-
-From the toplevel, run (as root):
-
-    ./lava-dispatch doc/examples/jobs/lava-ltp-job.json
-

=== removed file 'doc/arm_energy_probe.rst'
--- doc/arm_energy_probe.rst	2012-12-19 04:23:01 +0000
+++ doc/arm_energy_probe.rst	1970-01-01 00:00:00 +0000
@@ -1,102 +0,0 @@ 
-Using the ARM Energy Probe
-==========================
-
-The dispatcher includes a `signal handler`_ that allows tests in LAVA
-to include power measurement data per test case. Since the functionality
-is built into the dispatcher there are really two main things required to
-enable this.
-
- * deployment of a device with the AEP
- * creating jobs to use it
-
-.. _`signal handler`: external_measurement.html
-
-Deployment
-----------
-
-Hooking up probes to a specific board are beyond the scope of this document.
-However, once a board has a probe hooked up and plugged into the host PC,
-the dispatcher can be configured as follows::
-
-  # These options should be added to the device.conf ie:
-  # /srv/lava/instances/<INST>/etc/lava-dispatcher/devices/panda01.conf
-  # if the defaults are what's needed, then this can be skipped
-
-  # The location of the binary (default=/usr/local/bin/arm-probe)
-  arm_probe_binary = /home/doanac/linaro/arm-probe/arm-probe/arm-probe
-
-  # The location of the config file (default=/usr/local/etc/arm-probe-config)
-  arm_probe_config = /home/doanac/linaro/arm-probe/config
-
-  # The channels configured for this probe (can be an array default=VDD_VCORE1)
-  arm_probe_channels = VDD_VCORE1
-
-Since there may be a mix of device that have AEPs and different configs for
-the ones that do, its also recommended to use the LAVA admin interface for
-the lava-scheduler to define some tagging scheme that can be used to identify
-devices with certain AEP configs. This allows job files to then specify a
-tag if it needs AEP or some special AEP config.
-
-Creating a Job File
--------------------
-
-The job is pretty standard and can be read about our `jobfile`_ documentation.
-The specific thing needed for an AEP job would be the lava-test-shell action
-which would look something like::
-
-   {
-        "command": "lava_test_shell",
-        "parameters": {
-            "testdef_repos": [
-              {"bzr-repo": "lp:~doanac/+junk/arm-probe-demo",
-               "testdef": "arm-probe.yaml"
-              }
-            ],
-            "timeout": 1800
-        }
-    }
-
-.. _`jobfile`: jobfile.html
-
-Specifying the Test Definition
-------------------------------
-
-The test definintion should live in a bzr/git repository. The `above example's`_
-test definintion would look like::
-
-  metadata:
-      format: Lava-Test Test Definition 1.0
-      name: arm-probe-demo
-
-  handler:
-      handler-name: arm-probe
-      params:
-          # The post_process_script is run for each test case. Its called like:
-          # <script> <testcase_id> <aep stdout> <aep stderr> <aep channel_1>...
-          # This value can be either a relative path in the repo it lives in, or
-          # can be URL that will be downloaded
-          post_process_script: plot.sh
-          # probe_args allow you to add additional parameters when invoking the
-          # arm-probe binary
-          probe_args:
-            - -a
-            - '0.01'
-
-  install:
-      deps:
-          - cpuburn
-
-  run:
-      steps:
-          # These steps run on the target. lava-test-shell will call your script
-          # and ensure the host starts/stops the arm-probe for each test case
-          - 'lava-test-case aep-idle --shell ./aep-idle.sh'
-          - 'lava-test-case aep-burn --shell ./aep-burn.sh'
-
-
-Upon completion of the test run, the dispatcher will invoke the provided
-`postprocess_test_result`_ script so that it can generate things like graphs as it sees
-fit to compliment the data normally captured by LAVA.
-
-.. _`above example's`: http://bazaar.launchpad.net/~doanac/+junk/arm-probe-demo/files
-.. _`postprocess_test_result`: http://bazaar.launchpad.net/~doanac/+junk/arm-probe-demo/view/head:/plot.sh

=== removed file 'doc/changes.rst'
--- doc/changes.rst	2013-02-06 18:07:14 +0000
+++ doc/changes.rst	1970-01-01 00:00:00 +0000
@@ -1,561 +0,0 @@ 
-Version History
-***************
-
-.. _version_0_33:
-
-Version 0.33
-============
-* Unreleased
-* galaxy nexus fix for multiple device support
-* dispatcher config update for vexpress/android
-
-.. _version_0_32:
-
-Version 0.32
-============
-* galaxy nexus support
-* sdmux fixes
-* boot options fixes for master.py
-* support for killing --oob-fd hack in lava-scheduler
-
-.. _version_0_31_2:
-
-Version 0.31.2
-==============
-
-* Fixed bugs found executing tests under rtsm
-
-.. _version_0_31_1:
-
-Version 0.31.1
-==============
-
-* Fix typo in fastmodel.py
-
-.. _version_0_31:
-
-Version 0.31
-============
-
-* Use Launcher information from logcat to check for display of home screen.
-* Remove broken attempt to attach serial log to lava test run.
-
-.. _version_0_30:
-
-Version 0.30
-============
-* fillout log_lineno for lava-test-shell results
-* make fastmodel config files easier to manage
-* configglue warning fixes
-* keep old results directory around in lava-test-shell to help debug
-
-.. _version_0_29:
-
-Version 0.29
-============
-* fix to ARM Energy Probe post processing logic
-* enable networking in FastModel v8
-* add --target override parameter for "lava dispatch"
-* fix timeout bug in lava_test_shell action
-* foundational changes to get ready for Galaxy Nexus support
-* sdmux device support
-* partition file layout update for panda-android
-
-.. _version_0_28:
-
-Version 0.28
-============
-* lava-test-case should not return non-zero exit code for --shell false
-* Replace all usage of shutil.rmtree with a shell call to `rm -rf`
-* add support for ARM Energy Probe
-
-.. _version_0_27:
-
-Version 0.27
-============
-* bug fix: include lava-test-run-attach
-* improve serial console input delay
-
-.. _version_0_26:
-
-Version 0.26
-============
-* improve uinitrd logic for master.py and android
-* allow more options about what an android 'boot' means
-* sync on device-types that were actually in use in the lab
-
-.. _version_0_25:
-
-Version 0.25
-============
-* signal handlers can be implemented as shell scripts
-* various lava-test-shell bug fixes
-
-.. _version_0_24_1:
-
-Version 0.24.1
-==============
-* lava-test-shell related fixes
-
-.. _version_0_24:
-
-Version 0.24
-============
-* add a new "lava devices" command
-* fixed some configglue related warnings
-* some bug fixes for signals
-* improve android partition mount logic
-
-.. _version_0_23:
-
-Version 0.23
-============
-* signal support
-* fix pipe draining issue causing 100% cpu consumption with CTS
-* fix bug where ctrl-c causes exception
-* job priority support
-* YAML test def can be specified in a git/bzr repo
-
-.. _version_0_22:
-
-Version 0.22
-============
-* refactor fastmodel implementation to not require code changes for new products
-* simplify power_off/sync logic in targets
-* boot_options improvements
-* extract_tarball API added to target
-* change lava-test-shell defintion format to be YAML
-* allow test definitions to use a default parsing pattern
-
-.. _version_0_21:
-
-Version 0.21
-============
-* allow boot to master logic to retry a few times before exiting with error
-* move lava-test-shell test def format to use YAML
-* CTS fix
-* fix unicode issue in new usage of python's tarfile lp:1071279
-
-.. _version_0_20_1:
-
-Version 0.20.1
-==============
-* fixed prompt issue on Android that was causing timeouts
-
-.. _version_0_20:
-
-Version 0.20
-============
-* Support device version for qemu and rtsm.
-* Add dummy_deployment action.
-* Add mkdir -p /mnt/lava/boot to android deployment.
-
-.. _version_0_19_1:
-
-Version 0.19.1
-==============
-* fixed a packaging issue with lava_test_shell files
-
-.. _version_0_19:
-
-Version 0.19
-============
-* Change to using configglue to manage our configuration
-* transition to new "target" based API
-* add new "lava-test-shell" for black-box style test support
-* add v8 FoundationsModel support to fastmodel.py
-
-.. _version_0_18:
-
-Version 0.18
-============
-* fix issue with /etc/resolv.conf
-* removed unused/unsupported action attributes: pkg and kernel_matrix
-
-.. _version_0_17_2:
-
-Version 0.17.2
-==============
-* fixed sd card issue for Android Panda JellyBean
-
-.. _version_0_17.1:
-
-Version 0.17.1
-============
-* regression bug fix for ADB connections in FastModels
-* bug lp:1032467
-* don't leak LAVA access token into logfile
-
-.. _version_0_17:
-
-Version 0.17
-============
-* fixes for FastModel support
-* URL mapping feature
-* boot support for Open Embedded images
-
-.. _version_0_16:
-
-Version 0.16
-============
-* Fix #1028512, provide test image hostname custom option: tester_hostname.
-* Fix #1019630, possibility to set proxy error when sending serial port command.
-* Add support for Ubuntu images to FastModel client
-* Allow clients to handle custom boot options
-
-.. _version_0_15_2:
-
-Version 0.15.2
-==============
-* made consistent downloading and temp file creation logic to help prevent disk leakage
-
-.. _version_0_15_1:
-
-Version 0.15.1
-==============
-* fixed a bug causing cache leak and pre-built image test failure
-
-.. _version_0_15:
-
-Version 0.15
-============
-* support for /sdcard partition for Android
-* change vmalloc args for snowball
-* more cache logic cleanup
-* fastmodel client bug fixes
-* change over to use disablesuspend.sh script
-
-.. _version_0_14:
-
-Version 0.14
-============
-* FastModel support for Android
-* FastModel boot support for Ubuntu
-* QEMU device updates
-* Improved timeout handling
-
-.. _version_0_13:
-
-Version 0.13
-============
-
-* Add all repositories specified in the add_apt_repository command.
-* Increase the number of retries and decrease the wait time in
-  _deploy_tarball_to_board
-* Make sure all download code uses the configured proxy, and enable
-  custom cookies to be set when downloading.
-* Reboot after a lava-android-test times out.
-* Make lava-dispatch invoke lava dispatch, and make the latter's
-  logging setup match the formers
-* Fix lava_android_test_run.test_name to not error when an option is
-  passed to lava_android_test_run in the JSON.
-
-.. _version_0_12:
-
-Version 0.12
-============
-
-* Another attempt to detect a stuck port on an ACS6000.
-* Do not crash when wait_for_home_screen times out.
-
-.. _version_0_11:
-
-Version 0.11
-============
-
-* Watch for various messages from the connection_command that indicate
-  how successful the connection attempt has been, and do various
-  things in response.
-
-.. _version_0_10:
-
-Version 0.10
-============
-
-* Add support for a pre_connect_command that will be executed before
-  connection_command.
-* Add 'lava connect' and 'lava power-cycle' commands.
-
-.. _version_0_9:
-
-Version 0.9
-===========
-
-* Make retrying deployment if failed more robust.
-* Log a message when submit_results fails.
-
-Version 0.8
-===========
-
-* Fixed reboot issues
-* Skip raising exception on the home screen has not displayed for health check jobs
-* Retry deployment if failed.
-* Allow lava-test-install action to install extra debs.
-* Allow installing lava-test from a deb.
-* Support running tests with monkeyrunner.
-
-.. _version_0_7_1:
-
-Version 0.7.1
-=============
-
-* Increase the timeout around the shell commands to set up the proxy in the
-  test image.
-* Make the wget part of the wget|tar operations slightly more verbose.
-* Do not fetch the test images to the board through the proxy.
-
-.. _version_0_7:
-
-Version 0.7
-===========
-
-* Use squid proxy for caching mechanism
-* Run all lava-test install commands with a wrapper that catches errors.
-* Support tags in the job file.
-* Kill the process we're using to talk to the board on dispatcher exit.
-* Update the schema for add_apt_repository to match usage, making the action
-  usable again.
-
-.. _version_0_6:
-
-Version 0.6 (Milestone 12.04)
-=============================
-
-* Merge 0.5.12 bugfix release
-* Config options for interrupting boot process
-* Fix package dependency on python-keyring
-* Cache rootfs and boot tarballs
-
-.. _version_0_5_12:
-
-Version 0.5.12
-==============
-
-* Increase timeout for rootfs deployment to 5 hours (18000 seconds).
-  This should help in working with vexpress.
-
-.. _version_0_5_11:
-
-Version 0.5.11
-==============
-* Fixed boot android image problem caused by changing of init.rc file.
-* Make sure to look on device for bundles even if all test run steps fail.
-* Use the correct lmc_dev_arg for beagle-xm
-* Add qemu_drive_interface configuration option for the LAVA QEMU client.
-
-.. _version_0_5_10:
-
-Version 0.5.10
-==============
-* Omit the commands we send to the board from the log (as this output is
-  invariably echoed back and so was ending up in the output twice)
-
-* Convert the dispatcher to LAVA commnand. It can now be called from the shell
-  by running ``lava dispatch``. The old command line interface
-  ``lava-dispatch`` is now deprecated and will be removed in the 0.8 release in
-  three months.
-
-.. _version_0_5_9:
-
-Version 0.5.9
-=============
-* Make the validation of the job file that happens before a job starts
-  more rigorous.
-* Change snowball boot arg vmalloc=300M
-
-.. _version_0_5_8:
-
-Version 0.5.8
-=============
-* Changes for virtual express support:
-  * Add in a standard vexpress config for UEFI
-  * Make changes to allow for different boot interception message
-  configuration
-  * Increase timeouts for some stages of deployment (mkfs ext3) to
-  account for vexpress (lack of) speed.
-
-.. _version_0_5_7:
-
-Version 0.5.7
-=============
-
-* Allow a device's config to specify how to power cycle it.
-* Pass --force-yes to apt-get & call lava-test reset after installing it.
-* Increase wget connect timeout to see if we can work around a possible
-  issue where the server gets busy, and doesn't connect quickly enough
-  for getting the tarballs
-* Stop reading the long-obsolete 'image_type' field from the job json.
-* Add an field health_check in job schema to tell if the job is a health check
-  job.
-
-.. _version_0_5_6:
-
-Version 0.5.6
-=============
-
-* by default, a shell command run on the board that fails will now
-  fail the job.
-* combine submit_results and submit_results_on_host into one action,
-  although both action names are still supported.
-* allow deployment from a compressed image file
-* add support for optionally including a job id in the process name as
-  seen by top
-
-.. _version_0_5_5:
-
-Version 0.5.5
-=============
-* allow the job file to contain unknown propertiies
-
-.. _version_0_5_4:
-
-Version 0.5.4
-=============
-
-* allow deployment from an image file as well as a rootfs/hwpack combination
-* Auto accept the new snowball license update.
-
-.. _version_0_5_3:
-
-Version 0.5.3
-=============
-
-* Fix https://bugs.launchpad.net/lava-dispatcher/+bug/921527 - It is hard to
-  follow the lava-dispatcher logging when debug why the test job failed
-
-.. _version_0_5_2:
-
-Version 0.5.2
-=============
-
-* Fix https://launchpad.net/bugs/921632 - still submit some results even if
-  retrieve_results blows up
-* Fix https://launchpad.net/bugs/925396 - lava-dispatcher exits when test
-  failed
-* Minor documentation updates
-
-.. _version_0_5_1:
-
-Version 0.5.1
-=============
-
-* Fix broken rc check (Paul Larson)
-
-.. _version_0_5_0:
-
-Version 0.5.0
-=============
-
-* Add new android_install_binaries action
-* Fix problem when reporting failure messages that contain unicode
-* Refactor click-through workaround, and add support for new omap3
-  hwpacks
-* fix lava-test installation detection
-
-.. _version_0_4_5:
-
-Version 0.4.5
-=============
-* extend lmc timeout to 24 hours
-* retry until timeout for getting results
-* pass on timeout in PrefixCommandRunner.run
-
-.. _version_0_4_4:
-
-Version 0.4.4
-=============
-* Fix an issue with linaro-media-create timing out prematurely
-
-.. _version_0_4_3:
-
-Version 0.4.3
-=============
-* Workaround for license acceptance in lmc on snowball
-* Fix userdata deployment for origen and mx53
-* Fix missing piece for errno 17 on deployment (bug #897918)
-
-.. _version_0_4_2:
-
-Version 0.4.2 (Milestone 2012.01)
-=================================
-* Job files can now specify the filesystem to use for the rootfs.
-* It is now possible to include an auth token in the job file so that
-  results can be submitted to a private bundle stream.
-* Corrected errors with deploying Android 4.x
-* Snowball improvements and workaround for reboot issues on snowball
-* Better cleanup of temporary images if deployment fails
-* Bug fixes: #905457, #906772.
-
-.. _version_0_4_1:
-
-Version 0.4.1 (Milestone 11.12)
-===============================
-* Add support for Origen
-* Snowball default config fixes
-* Add support for new snowball hwpacks
-* Fix timeout usage in lava_test_install
-* Added logging for sending and expecting statements.
-* Bug fixes: #900990, #904544, #898525.
-
-.. _version_0_4:
-
-Version 0.4
-===========
-* Major refactoring of how commands are run on boards.
-* Set PS1 in a way that works on ice cream sandwich builds
-* Add --config-dir option.
-* Consistently fail if deployment fails.
-* Support for snowball V5 and later.
-
-.. _version_0_3_5:
-
-Version 0.3.5 (Milestone 11.11)
-===============================
-* Have soft_reboot look for a message that both android and regular images print
-* Update android demo job to download urls that will hopefully exist for a while
-* First pass at adding plugin support for lava actions
-* Add a --validate switch for using the dispatcher to validate the schema
-* Fix hang with add-apt-repository in oneiric
-* Add LAVA support for Android on MX53 QS board
-* Allow passing an option to the install step for lava-android-test
-* Increase timeout for waiting on the network to come up
-* Fix pypi installations issues
-* Add l-m-c version to metadata
-* Merge improvement for bug 874594 so the default timeout is shorten to 20mins
-* Fix demo job to install and run the same test
-* Remove old android tests and LavaAndroidClient
-* Move all the stuff that knows about conmux to a concrete subclass of a new connection abstract class
-
-.. _version_0_3_4:
-
-Version 0.3.4 (Milestone 11.10)
-===============================
-* Documentation for lava-dispatcher is now available from lava-dispatcher.readthedocs.org
-* Added support for snowball boards
-* Move bootloader prompt string to device_type configuration file
-* Bug fixes: #873043, #861115, #867858, #863091, #872948, #877045, #855384
-
-.. _version_0_3:
-
-Version 0.3 (Milestone 11.09)
-=============================
-* Local configuration data for lava-dispatcher is now stored in config files. (Please look at the README and examples of configuration)
-* A new kernel package can be specified for testing directly in the lava-dispatcher
-* The lava-dispatcher is now available as a package.
-* Bug fixes: #836700, #796618, #831784, #833246, #844462, #856247, #813919, #833181, #844299, #844301, #844446, #845720, #850983, #827727, #853657.
-
-.. _version_0_2:
-
-Version 0.2 (Milestone 11.08)
-=============================
-* Transferring results from the test system to the dispatcher is now more reliable
-* i.MX53 support added
-* Support added for installing out-of-tree tests
-* Bug fixes: #815986, #824622, #786005, #821385
-
-Version 0.1 (Milestone 11.07)
-=============================
-* LAVA dispatcher now tries to make as much progress in the test run as possible despite failures of previous actions, and keeps track of which actions passed or failed rather than just whether the whole test run completed or not.
-* Trial support for snowball board
-* Bug fixes: #791725, #806571, #768453
-

=== removed file 'doc/conf.py'
--- doc/conf.py	2013-08-19 10:36:46 +0000
+++ doc/conf.py	1970-01-01 00:00:00 +0000
@@ -1,228 +0,0 @@ 
-# -*- coding: utf-8 -*-
-#
-# LAVA Dispatcher documentation build configuration file, created by
-# sphinx-quickstart on Sat Sep 24 18:20:56 2011.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys
-import os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
-sys.path.append(os.path.abspath('..'))
-
-# -- General configuration -----------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = [
-    'sphinx.ext.autodoc',
-    'sphinx.ext.doctest',
-    'sphinx.ext.intersphinx',
-    'sphinx.ext.todo',
-    'sphinx.ext.coverage',
-    'sphinx.ext.viewcode']
-
-# Configuration for sphinx.ext.todo
-
-todo_include_todos = True
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = []
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'LAVA Dispatcher'
-copyright = u'2011, Linaro Validation Team'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = '0.32'
-# The full version, including alpha/beta/rc tags.
-release = '0.32'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = ['_build']
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages.  See the documentation for
-# a list of builtin themes.
-html_theme = 'default'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further.  For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents.  If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = []
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_domain_indices = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it.  The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'LAVADispatcherDoc'
-
-
-# -- Options for LaTeX output --------------------------------------------------
-
-# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
-
-# The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
-    ('index', 'LAVADispatcher.tex', u'LAVA Dispatcher Documentation',
-     u'Linaro Validation Team', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# If true, show page references after internal links.
-#latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-#latex_show_urls = False
-
-# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_domain_indices = True
-
-
-# -- Options for manual page output --------------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
-    ('index', 'lavadispatcher', u'LAVA Dispatcher Documentation',
-     [u'Linaro Validation Team'], 1)
-]

=== removed symlink 'doc/configuration.rst'
=== target was u'../lava_dispatcher/default-config/lava-dispatcher/README'
=== removed file 'doc/cu-ttyUSB0.cf'
--- doc/cu-ttyUSB0.cf	2011-03-07 21:09:20 +0000
+++ doc/cu-ttyUSB0.cf	1970-01-01 00:00:00 +0000
@@ -1,2 +0,0 @@ 
-listener panda01
-application console 'panda01 console' 'sg dialout "cu -l ttyUSB0 -s 115200"'

=== removed file 'doc/debugging.rst'
--- doc/debugging.rst	2013-08-23 10:25:49 +0000
+++ doc/debugging.rst	1970-01-01 00:00:00 +0000
@@ -1,119 +0,0 @@ 
-.. _debugging:
-
-Debugging LAVA test definitions
-*******************************
-
-.. _singlenode:
-
-Convert Multi-Node jobs to single node
-======================================
-
-The scripts available in the :ref:`multinode_api` are not installed for
-test jobs which are not part of a MultiNode group, so the job will simply
-fail that test as a ``command not found``.
-
-Therefore, by reversing the :ref:`changes_to_json`, a MultiNode JSON file
-can be converted to singlenode.
-
-Other calls which may require communication with other devices may need
-to be removed from your YAML. This can be extended to retain a set of
-singlenode YAML files in which new wrapper scripts and new builds are
-tested.
-
-The Job Definition of one job within a MultiNode group may be a good
-starting point for creating a singlenode equivalent.
-
-.. _set_x:
-
-Always use set -x in wrapper scripts
-====================================
-
-By viewing the complete log, the complete processing of the wrapper script
-becomes obvious.
-
-::
-
- #!/bin/sh
- set -e
- set -x
-
-.. _shell_operators:
-
-Avoid using shell operators in YAML lines
-=========================================
-
-Pipes, redirects and nested sub shells will not work reliably when put
-directly into the YAML. Use a wrapper script (with :ref:`set -x <set_x>`).
-
-::
-
- #!/bin/sh
-
- set -e
- set -x
- ifconfig|grep "inet addr"|grep -v "127.0.0.1"|cut -d: -f2|cut -d' ' -f1
-
-Un-nested sub-shells do work::
-
- - lava-test-case multinode-send-network --shell lava-send network hostname=$(hostname) fqdn=$(hostname -f)
-
-.. _check_messageid:
-
-Check that your message ID labels are consistent
-================================================
-
-A :ref:`lava_wait` must be preceded by a :ref:`lava_send` from at least
-one other device in the group or the waiting device will :ref:`timeout <timeouts>`
-
-This can be a particular problem if you remove test definitions from the
-JSON or edit a YAML file without checking other uses of the same file.
-
-``#`` can be used as a comment in YAML but JSON does not support
-comments, so take care.
-
-.. _parsers:
-
-Test your result parsers
-========================
-
-If the YAML uses a custom result parser, configure one of your YAML files
-to output the entire test result output to stdout so that you can
-reliably capture a representative block of output. Test your proposed
-result parser against the block using your favourite language.
-
-Comment out the parser from the YAML if there are particular problems,
-just to see what the default LAVA parsers can provide.
-
-.. _paths:
-
-Be obsessive about paths and scripts
-====================================
-
-* If you use ``cd`` in your YAML, always store where you were and where you end up using ``pwd``. 
-* Output your location prior to calling local wrapper scripts.
-* Ensure that all wrapper scripts are executable in your VCS
-* Ensure that the relevant interpreter is installed. e.g. python is not necessarily part of the test image.
-* Consider installing ``realpath`` and use that to debug your directory structure.
-  * Avoid the temptation of using absolute paths - LAVA may need to change the absolute locations.
-
-.. _failed_tests:
-
-A failed test is not necessarily a bug in the test
-==================================================
-
-Always check whether the test result came back as failed due to some
-cause other than the test definition itself. Particularly with MultiNode,
-a test result can fail due to some problem on a different board within
-the group.
-
-.. _json_files:
-
-Check your JSON files
-=====================
-
-Syntax problems will be picked up by LAVA when you submit but also check
-that the URLs listed in the JSON are correct. Keep your YAML descriptions,
-names and filenames unique so that it is easier to pick up if the JSON
-simply calls the wrong YAML test definition.
-
-

=== removed directory 'doc/examples'
=== removed directory 'doc/examples/jobs'
=== removed file 'doc/examples/jobs/lava-android-test.json'
--- doc/examples/jobs/lava-android-test.json	2012-04-19 11:38:00 +0000
+++ doc/examples/jobs/lava-android-test.json	1970-01-01 00:00:00 +0000
@@ -1,83 +0,0 @@ 
-{
-  "job_name": "lava_android_test",
-  "target": "panda01",
-  "timeout": 18000,
-  "actions": [
-    {
-      "command": "deploy_linaro_android_image",
-      "parameters":
-        {
-          "boot": "http://snapshots.linaro.org/android/~linaro-android/panda-ics-gcc46-kwg-upstream-open/540/target/product/pandaboard/boot.tar.bz2",
-          "system": "http://snapshots.linaro.org/android/~linaro-android/panda-ics-gcc46-kwg-upstream-open/540/target/product/pandaboard/system.tar.bz2",
-          "data": "http://snapshots.linaro.org/android/~linaro-android/panda-ics-gcc46-kwg-upstream-open/540/target/product/pandaboard/userdata.tar.bz2"
-        },
-      "metadata":
-        {
-          "rootfs.type": "android",
-          "rootfs.build": "12"
-        }
-    },
-    {
-      "command": "boot_linaro_android_image"
-    },
-    {
-      "command": "lava_android_test_install",
-      "parameters":
-        {
-            "tests": ["monkey", "0xbench", "busybox"]
-        }
-    },
-    {
-      "command": "lava_android_test_run",
-      "parameters":
-        {
-          "test_name": "busybox"
-        }
-    },
-    {
-      "command": "lava_android_test_run",
-      "parameters":
-        {
-          "test_name": "0xbench"
-        }
-    },
-    {
-      "command": "lava_android_test_run_custom",
-      "parameters":
-        {
-          "command_file": "http://bazaar.launchpad.net/~linaro-validation/lava-android-test/trunk/download/head:/busybox_test.sh-20110927085925-2fxzf7wrrtq4gci0-3/busybox_test.sh",
-          "parser": "^\\s*(?P<test_case_id>\\w+)=(?P<result>\\w+)\\s*$"
-        }
-    },
-    {
-      "command": "lava_android_test_run_custom",
-      "parameters":
-        {
-          "commands": ["tjunittest"],
-          "parser": "^\\s*(?P<test_case_id>.+)\\s+\\.\\.\\.\\s+(?P<result>\\w+)\\.\\s+(?P<measurement>[\\d\\.]+)\\s+(?P<units>\\w+)\\s*$"
-        }
-    },
-    {
-      "command": "lava_android_test_run",
-      "parameters":
-        {
-          "test_name": "monkey"
-        }
-    },
-    {
-      "command": "lava_android_test_run_monkeyrunner",
-      "parameters":
-        {
-          "url": "git://android.git.linaro.org/test/linaro/android/system.git"
-        }
-    },
-    {
-      "command": "submit_results_on_host",
-      "parameters":
-        {
-          "server": "http://validation.linaro.org/lava-server/RPC2/",
-          "stream": "/anonymous/lava-android-leb-panda/"
-        }
-    }
-  ]
-}

=== removed file 'doc/examples/jobs/lava-ltp-job.json'
--- doc/examples/jobs/lava-ltp-job.json	2011-12-22 06:11:18 +0000
+++ doc/examples/jobs/lava-ltp-job.json	1970-01-01 00:00:00 +0000
@@ -1,40 +0,0 @@ 
-{
-  "job_name": "foo",
-  "target": "panda01",
-  "timeout": 18000,
-  "actions": [
-    {
-      "command": "deploy_linaro_image",
-      "parameters":
-        {
-          "rootfs": "http://snapshots.linaro.org/11.05-daily/linaro-developer/20110208/0/images/tar/linaro-n-developer-tar-20110208-0.tar.gz",
-          "hwpack": "http://snapshots.linaro.org/11.05-daily/linaro-hwpacks/panda/20110208/0/images/hwpack/hwpack_linaro-panda_20110208-0_armel_supported.tar.gz"
-        }
-    },
-    {
-      "command": "lava_test_install",
-      "parameters":
-        {
-            "tests": ["ltp"]
-        }
-    },
-    {
-      "command": "boot_linaro_image"
-    },
-    {
-      "command": "lava_test_run",
-      "parameters":
-        {
-          "test_name": "ltp"
-        }
-    },
-    {
-      "command": "submit_results",
-      "parameters":
-        {
-          "server": "http://validation.linaro.org/lava-server/RPC2/",
-          "stream": "/anonymous/panda01-ltp/"
-        }
-    }
-  ]
-}

=== removed file 'doc/examples/jobs/lava-out-of-tree-test-1.json'
--- doc/examples/jobs/lava-out-of-tree-test-1.json	2011-12-22 06:11:18 +0000
+++ doc/examples/jobs/lava-out-of-tree-test-1.json	1970-01-01 00:00:00 +0000
@@ -1,48 +0,0 @@ 
-{
-  "job_name": "foo",
-  "target": "panda01",
-  "timeout": 18000,
-  "actions": [
-    {
-      "command": "deploy_linaro_image",
-      "parameters":
-        {
-          "rootfs": "http://snapshots.linaro.org/11.05-daily/linaro-ubuntu-desktop/20110814/0/images/tar/linaro-n-ubuntu-desktop-tar-20110814-0.tar.gz",
-          "hwpack": "http://snapshots.linaro.org/11.05-daily/linaro-hwpacks/panda/20110814/0/images/hwpack/hwpack_linaro-panda_20110814-0_armel_supported.tar.gz"
-        }
-    },
-    {
-      "command": "add_apt_repository",
-      "parameters":
-        {
-            "arg": ["ppa:linaro-graphics-wg/ppa"]
-        }
-    },
-    {
-      "command": "lava_test_install",
-      "parameters":
-        {
-            "tests": ["glcompbench"],
-            "install_python": ["bzr+http://bazaar.launchpad.net/~linaro-graphics-wg/+junk/linaro-graphics-wg-tests/#egg=linaro-graphics-wg-tests"]
-        }
-    },
-    {
-      "command": "boot_linaro_image"
-    },
-    {
-      "command": "lava_test_run",
-      "parameters":
-        {
-          "test_name": "glcompbench"
-        }
-    },
-    {
-      "command": "submit_results",
-      "parameters":
-        {
-          "server": "http://validation.linaro.org/lava-server/RPC2/",
-          "stream": "/anonymous/panda01-tesing/"
-        }
-    }
-  ]
-}

=== removed file 'doc/examples/jobs/lava-out-of-tree-test-2.json'
--- doc/examples/jobs/lava-out-of-tree-test-2.json	2011-12-22 06:11:18 +0000
+++ doc/examples/jobs/lava-out-of-tree-test-2.json	1970-01-01 00:00:00 +0000
@@ -1,41 +0,0 @@ 
-{
-  "job_name": "foo",
-  "target": "panda01",
-  "timeout": 18000,
-  "actions": [
-    {
-      "command": "deploy_linaro_image",
-      "parameters":
-        {
-          "rootfs": "http://snapshots.linaro.org/11.05-daily/linaro-ubuntu-desktop/20110814/0/images/tar/linaro-n-ubuntu-desktop-tar-20110814-0.tar.gz",
-          "hwpack": "http://snapshots.linaro.org/11.05-daily/linaro-hwpacks/panda/20110814/0/images/hwpack/hwpack_linaro-panda_20110814-0_armel_supported.tar.gz"
-        }
-    },
-    {
-      "command": "lava_test_install",
-      "parameters":
-        {
-            "tests": ["linaro.pmwg"],
-            "register": ["http://bazaar.launchpad.net/~linaro-validation/lava-test/trunk/download/head:/powermanagementtests-20110628125042-dnw8hkhbce7pqi0y-2/power-management-tests.json"]
-        }
-    },
-    {
-      "command": "boot_linaro_image"
-    },
-    {
-      "command": "lava_test_run",
-      "parameters":
-        {
-          "test_name": "linaro.pmwg"
-        }
-    },
-    {
-      "command": "submit_results",
-      "parameters":
-        {
-          "server": "http://validation.linaro.org/lava-server/dashboard/xml-rpc/",
-          "stream": "/anonymous/panda01-testing/"
-        }
-    }
-  ]
-}

=== removed file 'doc/examples/jobs/test_with_testoptions.json'
--- doc/examples/jobs/test_with_testoptions.json	2011-12-22 06:11:18 +0000
+++ doc/examples/jobs/test_with_testoptions.json	1970-01-01 00:00:00 +0000
@@ -1,25 +0,0 @@ 
-{
-  "job_name": "foo",
-  "target": "panda01",
-  "timeout": 18000,
-  "actions": [
-    {
-      "command": "lava_test_install",
-      "parameters":
-        {
-            "tests": ["peacekeeper"]
-        }
-    },
-    {
-      "command": "boot_linaro_image"
-    },
-    {
-      "command": "lava_test_run",
-      "parameters":
-        {
-          "test_name": "peacekeeper",
-          "test_options": "firefox"
-        }
-    }
-  ]
-}

=== removed directory 'doc/examples/plugins'
=== removed directory 'doc/examples/plugins/demo-action-plugin'
=== removed file 'doc/examples/plugins/demo-action-plugin/README'
--- doc/examples/plugins/demo-action-plugin/README	2011-11-14 23:09:29 +0000
+++ doc/examples/plugins/demo-action-plugin/README	1970-01-01 00:00:00 +0000
@@ -1,4 +0,0 @@ 
-This is a simple example of how to write a plugin action for LAVA
-Dispatcher.  You will need to install both lava-dispatcher and
-demo-action-plugin for it to work.  The json file provided here will run
-the action and exit.

=== removed file 'doc/examples/plugins/demo-action-plugin/demo-action-plugin.json'
--- doc/examples/plugins/demo-action-plugin/demo-action-plugin.json	2011-11-14 23:09:29 +0000
+++ doc/examples/plugins/demo-action-plugin/demo-action-plugin.json	1970-01-01 00:00:00 +0000
@@ -1,10 +0,0 @@ 
-{
-  "job_name": "foo",
-  "target": "panda01",
-  "timeout": 18000,
-  "actions": [
-    {
-      "command": "foo"
-    }
-  ]
-}

=== removed directory 'doc/examples/plugins/demo-action-plugin/demo_action_plugin'
=== removed file 'doc/examples/plugins/demo-action-plugin/demo_action_plugin/__init__.py'
=== removed file 'doc/examples/plugins/demo-action-plugin/demo_action_plugin/foo.py'
--- doc/examples/plugins/demo-action-plugin/demo_action_plugin/foo.py	2013-07-18 14:01:21 +0000
+++ doc/examples/plugins/demo-action-plugin/demo_action_plugin/foo.py	1970-01-01 00:00:00 +0000
@@ -1,30 +0,0 @@ 
-#!/usr/bin/python
-
-# Copyright (C) 2011 Linaro Limited
-#
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-from lava_dispatcher.actions import BaseAction
-
-
-class cmd_foo(BaseAction):
-
-    def run(self):
-        """ do something """
-        print("Hello from demo-action-plugin")

=== removed file 'doc/examples/plugins/demo-action-plugin/setup.py'
--- doc/examples/plugins/demo-action-plugin/setup.py	2013-07-18 14:01:21 +0000
+++ doc/examples/plugins/demo-action-plugin/setup.py	1970-01-01 00:00:00 +0000
@@ -1,19 +0,0 @@ 
-#!/usr/bin/env python
-
-from setuptools import setup
-
-setup(
-    name='demo-action-plugin',
-    version='0.0.1',
-    author='Paul Larson',
-    author_email='paul.larson@linaro.org',
-    url='',
-    description='LAVA Dispatcher plugin test',
-    packages=['demo_action_plugin'],
-    entry_points="""
-    [lava_dispatcher.actions]
-    foo = demo_action_plugin.foo:cmd_foo
-    """,
-    zip_safe=False,
-    include_package_data=True
-)

=== removed file 'doc/external_measurement.rst'
--- doc/external_measurement.rst	2013-07-18 14:01:21 +0000
+++ doc/external_measurement.rst	1970-01-01 00:00:00 +0000
@@ -1,215 +0,0 @@ 
-Hooks, Signals and External Measurement
-=======================================
-
-.. warning::
-   This is work in progress!  Expect changes in details until at least early 2013.
-
-It is sometimes the case that an interesting test cannot be run solely
-on the device being tested: additional data from somewhere else is
-required.  For example, a test of the sound subsystem may want to
-generate audio, play it, capture it on another system and then compare
-the generated and captured audio.  A `lava-test-shell`_ test can be
-written to send **signals** to indicate when a test case starts and
-finishes which can be handled by a **handler** specified by the test
-definition.
-
-.. _`lava-test-shell`: lava_test_shell.html
-
-Signals
--------
-
-A signal is a message from the system being tested ("device") to the
-system the dispatcher is running on ("host").  The messaging is
-synchronous and uni-directional: lava-test-shell on the device will
-wait for the signal to be processesed and there is no way for the
-device to receieve data from the host.
-
-Generally speaking, we expect a test author will only be interested in
-handling the "start test case" and "end test case" signals that are
-sent by ``lava-test-case --shell``.
-
-Handler
--------
-
-A handler is a Python class that subclasses:
-
-.. autoclass:: lava_dispatcher.signals.SignalHandler
-
-This class defines three methods that you almost certainly want to
-override:
-
- 1. ``start_testcase(self, test_case_id):``
-
-    Called when a testcase starts on the device.  The return value of
-    this method is passed to both ``end_testcase`` and
-    ``processes_test_run``.
-
-    The expected case is something like: starting a process that
-    captures some data from or about the device and returning a
-    dictionary that indicates the pid of that process and where its
-    output is going.
-
- 2. ``end_testcase(self, test_case_id, case_data):``
-
-    Called when a testcase ends on the device.  ``case_data`` is
-    whatever the corresponding ``start_testcase`` call returned.
-
-    The expected case here is that you will terminate the process that
-    was started by ``start_testcase``.
-
- 3. ``process_test_result(self, test_result, case_data):``
-
-    Here you are expected to add the data that was recorded during the
-    test run to the results.  You need to know about the bundle format
-    to do this.
-
-These methods are invoked with catch-all exception handlers around
-them so you don't have to be super careful in their implementation: it
-should not be possible to crash the whole dispatcher with a typo in
-one of them.
-
-There are other methods you might want to override in some situations
--- see the source for more.
-
-Here is a very simple complete handler::
-
-  import datetime
-  import time
-
-  from json_schema_validator.extensions import timedelta_extension
-
-  from lava_dispatcher.signals import SignalHandler
-
-  class AddDuration(SignalHandler):
-
-      def start_testcase(self, test_case_id):
-          return {
-              'starttime': time.time()
-              }
-
-      def end_testcase(self, test_case_id, data):
-          data['endtime'] = time.time()
-
-      def postprocess_test_result(self, test_result, data):
-          delta = datetime.timedelta(seconds=data['endtime'] - data['starttime'])
-          test_result['duration'] = timedelta_extension.to_json(delta)
-
-Specifying a handler
---------------------
-
-A handlers are named the test definition, for example::
-
-  handler:
-    handler-name: add-duration
-
-The name is the name of an `entry point`_ from the
-``lava.signal_handlers`` "group".  The entry point must be provided by
-a package installed into the instance that the dispatcher is running
-from.
-
-.. _`entry point`: http://packages.python.org/distribute/pkg_resources.html#entry-points
-
-Providing handlers as shell scripts
------------------------------------
-
-Using the 'shell-hooks' handler that is distributed with the
-dispatcher it is possible to write handlers as scripts in the same VCS
-repository as the test definition itself.
-
-The simplest usage looks like this::
-
-  handler:
-    handler-name: shell-hooks
-    params:
-      handlers:
-        start_testcase: start-testcase.sh
-        end_testcase: end-testcase.sh
-        postprocess_test_result: postprocess-test-result.sh
-
-The scripts named in ``handlers`` are invoked with a test-case
-specific directory as the current working directory so they can store
-and access data in local paths.  The scripts named by
-``start_testcase`` and ``end_testcase`` are invoked with no arguments
-but ``postprocess_test_result`` is invoked with a single argument: a
-directory which contains the on-disk representation of the test result
-as produced on the device (this on-disk representation is not yet
-fully documented).  If a hook produces output, it will be attached to
-the test result.
-
-As many interesting hooks need to have information about the device
-being tested, there is a facility for putting values from the device
-config into the environment of the hooks.  For example, the following
-test definition sets the environment variable ``$DEVICE_TYPE`` to the
-value of the ``device_type`` key::
-
-  handler:
-    handler-name: shell-hooks
-    params:
-      device_config_vars:
-        DEVICE_TYPE: device_type
-      handlers:
-        ...
-
-For a slightly silly example of a shell hook, let's try to mark any
-test that takes more than 10 seconds (as viewed from the host) as
-failed, even if they report success on the device, and also attach
-some meta data about the device to each test result.
-
-The start hook (``start-hook.sh`` in the repository) just records the
-current unix timestamp in a file (we can just use the cwd as a scratch
-storage area)::
-
-  #!/bin/sh
-  date +%s > start-time
-
-The end hook (``end-hook.sh``) just records the end time::
-
-  #!/bin/sh
-  date +%s > end-time
-
-The postprocess hook (``post-process-result-hook.sh``) reads the times
-recorded by the above hooks, overwrites the result if necessary and
-creates an attachment containing the device type::
-
-  #!/bin/sh
-  start_time=`cat start-time`
-  end_time=`cat end-time`
-  if [ $((end_time - start_time)) -gt 10 ]; then
-      echo fail > $1/result
-  fi
-  echo $DEVICE_TYPE > $1/attachments/device-type.txt
-
-A test definition that glues this all together would be::
-
-  metadata:
-    format: Lava-Test Test Definition 1.0
-    name: shell-hook-example
-
-  run:
-    steps:
-      - lava-test-case pass-test --shell sleep 5
-      - lava-test-case fail-test --shell sleep 15
-
-  handler:
-    handler-name: shell-hooks
-    params:
-      device_config_vars:
-        DEVICE_TYPE: device_type
-      handlers:
-        start_testcase: start-hook.sh
-        end_testcase: end-hook.sh
-        postprocess_test_result: post-process-result-hook.sh
-
-A repository with all the above piece is on Launchpad in the branch
-`lp:~linaro-validation/+junk/shell-hook-example`_ so an action for
-your job file might look like::
-
-    {
-        "command": "lava_test_shell",
-        "parameters": {
-            "testdef_repos": [{"bzr-repo": "lp:~linaro-validation/+junk/shell-hook-example"}],
-            "timeout": 1800
-         }
-    },
-
-.. _`lp:~linaro-validation/+junk/shell-hook-example`: http://bazaar.launchpad.net/~linaro-validation/+junk/shell-hook-example/files

=== removed file 'doc/index.rst'
--- doc/index.rst	2013-09-11 15:49:26 +0000
+++ doc/index.rst	1970-01-01 00:00:00 +0000
@@ -1,67 +0,0 @@ 
-.. LAVA Dispatcher documentation master file, created by sphinx-quickstart on
-   Fri Sep 23 10:15:12 2011.  You can adapt this file completely to your
-   liking, but it should at least contain the root `toctree` directive.
-
-LAVA Dispatcher Documentation
-*****************************
-
-LAVA Dispatcher is used to dispatch test jobs from server(master node) to the target
-boards in validation farm, and publish the test result back to dashboard. It is
-scheduled by validation scheduler, and it could also run as standalone.
-
-You can see an up-to-date list of supported target devices by looking at the
-`device types`_ in Launchpad.
-
-.. _device types: http://bazaar.launchpad.net/~linaro-validation/lava-dispatcher/trunk/files/head:/lava_dispatcher/default-config/lava-dispatcher/device-types
-
-Installation
-============
-
-The best way to install this is by doing a full deployment of LAVA. This is
-documented on our `main project page`_ or the Documentation link on any
-LAVA instance. However, you can also setup the dispatcher for 
-`stand-alone development and testing`_.
-
-.. _main project page: /static/docs/
-.. _stand-alone development and testing: standalonesetup.html
-
-Indices and tables
-==================
-
-.. toctree::
-   :maxdepth: 2
-
-   standalonesetup.rst
-   configuration.rst
-   jobfile.rst
-   usage.rst
-   lava_test_shell.rst
-   external_measurement.rst
-   arm_energy_probe.rst
-   sdmux.rst
-   multinode.rst
-   proxy.rst
-
-* :ref:`search`
-
-Source code, bugs and patches
-=============================
-
-The project is maintained on Launchpad at
-http://launchpad.net/lava-dispatcher/.
-
-We maintain an online log of `release notes`_
-
-.. _release notes: changes.html
-
-You can get the source code with bazaar using ``bzr branch
-lp:lava-dispatcher``.  Patches can be submitted using Launchpad merge proposals
-(for introduction to this and topic see
-https://help.launchpad.net/Code/Review).
-
-Please report all bugs at https://bugs.launchpad.net/lava-dispatcher/+filebug.
-
-Most of the team is usually available in ``#linaro`` on ``irc.freenode.net``.
-Feel free to drop by to chat and ask questions.
-
-

=== removed file 'doc/jobfile-android.rst'
--- doc/jobfile-android.rst	2012-10-10 19:29:04 +0000
+++ doc/jobfile-android.rst	1970-01-01 00:00:00 +0000
@@ -1,66 +0,0 @@ 
-Deploy Android
-==============
-
-Here's an example of a job file that will deploy and boot an Android image::
-
-    {
-      "job_name": "android_test",
-      "target": "panda01",
-      "timeout": 18000,
-      "actions": [
-        {
-          "command": "deploy_linaro_android_image",
-          "parameters":
-            {
-              "boot": "http://releases.linaro.org/12.09/android/leb-panda/boot.tar.bz2",
-              "system": "http://releases.linaro.org/12.09/android/leb-panda/system.tar.bz2",
-              "data": "http://releases.linaro.org/12.09/android/leb-panda/userdata.tar.bz2"
-            }
-        },
-        {
-          "command": "boot_linaro_android_image"
-        },
-        {
-          "command": "lava_android_test_install",
-          "parameters":
-            {
-                "tests": ["0xbench"]
-            }
-        },
-        {
-          "command": "lava_android_test_run",
-          "parameters":
-            {
-              "test_name": "0xbench"
-            }
-        },
-        {
-          "command": "submit_results_on_host",
-          "parameters":
-            {
-              "server": "http://validation.linaro.org/lava-server/RPC2/",
-              "stream": "/anonymous/lava-android-leb-panda/"
-            }
-        }
-      ]
-    }
-
-Installing Panda Binary Blobs
------------------------------
-
-Some Android builds for Panda require a binary blob to be installed. This can
-be done by adding the ``android_install_binaries`` after the
-``deploy_linaro_android_image``::
-
-        {
-          "command": "deploy_linaro_android_image",
-          "parameters":
-            {
-              "boot": "http://releases.linaro.org/12.09/android/leb-panda/boot.tar.bz2",
-              "system": "http://releases.linaro.org/12.09/android/leb-panda/system.tar.bz2",
-              "data": "http://releases.linaro.org/12.09/android/leb-panda/userdata.tar.bz2"
-            }
-        },
-        {
-          "command": "android_install_binaries"
-        }

=== removed file 'doc/jobfile-lmc.rst'
--- doc/jobfile-lmc.rst	2012-10-10 19:29:04 +0000
+++ doc/jobfile-lmc.rst	1970-01-01 00:00:00 +0000
@@ -1,33 +0,0 @@ 
-Deploy Using linaro-media-create
-================================
-
-Here's an example of a job file that will deploy an image on a target based on
-a hardware pack and root filesystem from Linaro::
-
-    {
-      "job_name": "panda-lmc",
-      "target": "panda01",
-      "timeout": 18000,
-      "actions": [
-        {
-          "command": "deploy_linaro_image",
-          "parameters":
-            {
-              "rootfs": "http://releases.linaro.org/12.09/ubuntu/precise-images/nano/linaro-precise-nano-20120923-417.tar.gz",
-              "hwpack": "http://releases.linaro.org/12.09/ubuntu/leb-panda/hwpack_linaro-lt-panda-x11-base_20120924-329_armhf_supported.tar.gz"
-            }
-        },
-        {
-          "command": "boot_linaro_image"
-        },
-        {
-          "command": "submit_results",
-          "parameters":
-            {
-              "server": "http://localhost/lava-server/RPC2/",
-              "stream": "/anonymous/test/"
-            }
-        }
-      ]
-    }
-

=== removed file 'doc/jobfile-prebuilt.rst'
--- doc/jobfile-prebuilt.rst	2012-10-10 19:29:04 +0000
+++ doc/jobfile-prebuilt.rst	1970-01-01 00:00:00 +0000
@@ -1,31 +0,0 @@ 
-Deploy Pre-Built Image
-======================
-
-Here's a minimal job that will deploy and boot a pre-built image::
-
-    {
-      "job_name": "panda-prebuilt",
-      "target": "panda01",
-      "timeout": 18000,
-      "actions": [
-        {
-          "command": "deploy_linaro_image",
-          "parameters":
-            {
-              "image": "http://releases.linaro.org/12.09/ubuntu/leb-panda/lt-panda-x11-base-precise_ubuntu-desktop_20120924-329.img.gz"
-            }
-        },
-        {
-          "command": "boot_linaro_image"
-        },
-        {
-          "command": "submit_results",
-          "parameters":
-            {
-              "server": "http://localhost/lava-server/RPC2/",
-              "stream": "/anonymous/test/"
-            }
-        }
-      ]
-    }
-

=== removed file 'doc/jobfile.rst'
--- doc/jobfile.rst	2013-02-08 18:10:30 +0000
+++ doc/jobfile.rst	1970-01-01 00:00:00 +0000
@@ -1,155 +0,0 @@ 
-.. _jobfile:
-
-Writing a Dispatcher Job File
-*****************************
-There are dozens of permutations for creating job files in the dispatcher.
-This page goes through some common scenarios:
-
-The base skeleton job files should look like:
-
- * `Deploy a Pre-Built Image <jobfile-prebuilt.html>`_
- * `Deploy Using linaro-media-create <jobfile-lmc.html>`_
- * `Deploy an Android Image <jobfile-android.html>`_
-
-**NOTE:** Each of the above jobs uses the ``target`` parameter to specify the
-exact target to run the job on. If submitting a job via the scheduler, you'll
-likely want to just choose the ``device_type`` and let the scheduler find an
-idle device for you. This is done by removing the target line and adding::
-
-        "device_type": "panda",
-
-Executing Tests on Ubuntu
-=========================
-
-Tests are executed on Ubuntu by adding a ``lava_test_install`` and
-``lava_test_run`` action to your base job file::
-
-    {
-        "command": "lava_test_install",
-        "parameters": {
-            "tests": ["stream"]
-        }
-    },
-    {
-        "command": "boot_linaro_image"
-    },
-    {
-        "command": "lava_test_run",
-        "parameters": {
-            "test_name": "stream"
-        }
-    },
-
-**NOTE:** The ``lava_test_install`` action should follow the
-``deploy_linaro_image`` action.
-
-Executing Tests on Android
-==========================
-
-Tests are executed on Android  by adding a ``lava_android_test_install`` and
-``lava_android_test_run`` action to your base job file::
-
-    {
-        "command": "lava_android_test_install",
-        "parameters": {
-            "tests": ["busybox"]
-        }
-    },
-    {
-        "command": "boot_linaro_android_image"
-    },
-    {
-        "command": "lava_android_test_run",
-        "parameters": {
-            "test_name": "busybox"
-        }
-    },
-
-Using LAVA Test Shell
-=====================
-The ``lava_test_shell`` action provides a way to employ a more black-box style
-testing approach with the target device. The action only requires that a
-deploy action (deploy_linaro_image/deploy_linaro_android_image) has been
-executed. Its format is::
-
-    {
-        "command": "lava_test_shell",
-        "parameters": {
-            "testdef_urls": [
-                "http://people.linaro.org/~doanac/lava/lava_test_shell/testdefs/lava-test.yaml"
-            ],
-            "timeout": 1800
-        }
-    }
-
-You can put multiple test definition URLs in "testdef_urls"
-section. The "testdef_urls" section takes a list of strings which are
-URLs. These will be run sequentially without reboot. Alternatively,
-you can specify each URL in a separate ``lava_test_shell`` action
-which will allow for a reboot between each test.
-
-If your test definitions are available in a git repository then
-``lava_test_shell`` can automatically pull the test definition from
-the git repository and execute it. The format is::
-
-    {
-      "command": "lava_test_shell",
-      "parameters": {
-          "testdef_repos": [
-              {"git-repo": "git://git.linaro.org/people/stylesen/sampletestdefs.git",
-               "revision": "91df22796f904677c0fe5df787fc04234bf97691",
-               "testdef": "testdef.yaml"
-              }],
-      "timeout": 1800
-      }
-    }
-
-Alternatively, if your test definitions are available in a bzr repository then
-``lava_test_shell`` can automatically pull the test definition from
-the bzr repository and execute it. The format is::
-
-    {
-      "command": "lava_test_shell",
-      "parameters": {
-          "testdef_repos": [
-              {"bzr-repo": "lp:~stylesen/lava-dispatcher/sampletestdefs-bzr",
-               "revision": "1",
-               "testdef": "testdef.yaml"
-              }],
-      "timeout": 1800
-      }
-    },
-
-In both the above formats "revision" and "testdef" are optional. If
-"revision" is not specified then the latest revision in the repository is
-cloned. If there is no "testdef" specified, then inside the cloned
-directory of the repository a file with name "lavatest.yaml" is looked
-up which is the default name for test definitions. The "testdef"
-parameter could be used in order to override the default name for test
-definition file.
-
-.. seealso:: The test definition format for ``lava_test_shell``
-             actions `here <lava_test_shell.html>`_
-
-             Developer documentation for ``lava_test_shell`` is
-             available `here <http://bazaar.launchpad.net/~linaro-validation/lava-dispatcher/trunk/view/head:/lava_dispatcher/actions/lava_test_shell.py#L23>`_
-
-Adding Meta-Data
-================
-
-Both deploy actions support an optional field, ``metadata``. The value of this
-option is a set of key-value pairs like::
-
-    {
-        "command": "deploy_linaro_image",
-        "parameters": {
-            "image": "http://releases.linaro.org/12.09/ubuntu/leb-panda/lt-panda-x11-base-precise_ubuntu-desktop_20120924-329.img.gz",
-            "metadata": {
-                "ubuntu.image_type": "ubuntu-desktop",
-                "ubuntu.build": "61"
-            }
-        }
-    }
-
-This data will be uploaded into the LAVA dashboard when the results are
-submitted and can then be used as filter criteria for finding data.

=== removed file 'doc/lava_test_shell.rst'
--- doc/lava_test_shell.rst	2013-04-12 10:43:56 +0000
+++ doc/lava_test_shell.rst	1970-01-01 00:00:00 +0000
@@ -1,232 +0,0 @@ 
-LAVA Test Shell
-***************
-
-The ``lava_test_shell`` action provides a way to employ a more black-box style
-testing appoach with the target device. The test definition format is quite
-flexible and allows for some interesting things.
-
-Quick start
-===========
-
-A minimal test definition looks like this::
-
-  metadata:
-    name: passfail
-    format: "Lava-Test-Shell Test Definition 1.0"
-    description: "A simple passfail test for demo."
-    os:
-      - ubuntu
-      - openembedded
-    devices:
-      - origen
-      - panda
-    environment:
-      - lava-test-shell
-
-  run:
-      steps:
-          - echo "test-1: pass"
-          - echo "test-2: fail"
-
-  parse:
-      pattern: "(?P<test_case_id>.*-*):\\s+(?P<result>(pass|fail))"
-
-**NOTE:** The parse pattern has similar quoting rules as Python, so
-\\s must be escaped as \\\\s and similar.
-
-However, the parameters such as os, devices, environment are optional in
-the metadata section. On the other hand parameters such as name, format,
-description are mandatory in the metadata section.
-
-If your test definition is not part of a bzr or git repository then it
-is mandatory to have a 'version' parameter in metadata section. The
-following example shows how a test definition metadata section will
-look like for a test definition which is not part of bzr or git
-repository::
-
-  metadata:
-    name: passfail
-    format: "Lava-Test-Shell Test Definition 1.0"
-    version: "1.0"
-    description: "A simple passfail test for demo."
-    os:
-      - ubuntu
-      - openembedded
-    devices:
-      - origen
-      - panda
-    environment:
-      - lava-test-shell
-
-**NOTE:** Only if the test definition is referred from a URL the
-version parameter should be explicit.
-
-A lava-test-shell is run by:
-
- * "compiling" the above test defintion into a shell script
- * copying this script onto the device and arranging for it to be run
-   when the device boots
- * booting the device and letting the test run
- * retrieving the output from the device and turning it into a test
-   result bundle
-
-Writing a test for lava-test-shell
-==================================
-
-For the majority of cases, the above approach is the easiest thing to
-do: write shell code that outputs "test-case-id: result" for each test
-case you are interested in.  This is similar to how the lava-test
-parsing works, so until we get around to writing documentation here,
-see
-http://lava-test.readthedocs.org/en/latest/usage.html#adding-results-parsing.
-
-The advantage of the parsing approach is that it means your test is
-easy to work on independently from LAVA: simply write a script that
-produces the right sort of output, and then provide a very small
-amount of glue to wire it up in LAVA.  However, when you need it,
-there is also a more powerful, LAVA-specific, way of writing tests.
-When a test runs, ``$PATH`` is arranged so that some LAVA-specific
-utilities are available:
-
- * ``lava-test-case``
- * ``lava-test-case-attach``
- * ``lava-test-run-attach``
-
-You need to use ``lava-test-case`` (specifically, ``lava-test-case
---shell``) when you are working with `hooks, signals and external
-measurement`_.
-
-.. _`hooks, signals and external measurement`: external_measurement.html
-
-lava-test-case
---------------
-
-lava-test-case records the results of a single test case.  For example::
-
-  steps:
-    - "lava-test-case simpletestcase --result pass"
-
-It has two forms.  One takes arguments to describe the outcome of the
-test case and the other takes the shell command to run -- the exit
-code of this shell command is used to produce the test result.
-
-Both forms take the name of the testcase as the first argument.
-
-The first form takes these additional arguments:
-
- * ``--result $RESULT``: $RESULT should be one of pass/fail/skip/unknown
- * ``--measurement $MEASUREMENT``: A numerical measurement associated with the test result
- * ``--units $UNITS``: The units of $MEASUREMENT
-
-``--result`` must always be specified.  For example::
-
-  run:
-    steps:
-      - "lava-test-case bottle-count --result pass --measurement 99 --units bottles"
-
-The second form is indicated by the --shell argument, for example::
-
-  run:
-    steps:
-      - "lava-test-case fail-test --shell false"
-      - "lava-test-case pass-test --shell true"
-
-The --shell form also sends the start test case and end test case
-signals that are described in `hooks, signals and external
-measurement`_.
-
-lava-test-case-attach
----------------------
-
-This attaches a file to a test result with a particular ID, for example::
-
-  steps:
-    - "echo content > file.txt"
-    - "lava-test-case test-attach --result pass"
-    - "lava-test-case-attach test-attach file.txt text/plain"
-
-The arguments are:
-
- 1. test case id
- 2. the file to attach
- 3. (optional) the MIME type of the file (if no MIME type is passed, a
-    guess is made based on the filename)
-
-lava-test-run-attach
---------------------
-
-This attaches a file to the overall test run that lava-test-shell is
-currently executing, for example::
-
-  steps:
-    - "echo content > file.txt"
-    - "lava-test-run-attach file.txt text/plain"
-
-The arguments are:
-
- 1. the file to attach
- 2. (optional) the MIME type of the file (if no MIME type is passed, a
-    guess is made based on the filename)
-
-
-Handling Dependencies (Ubuntu)
-==============================
-
-If your test requires some packages to be installed before its run it can
-express that in the ``install`` section with::
-
-  install:
-      deps:
-          - linux-libc-dev
-          - build-essential
-
-Adding Git/BZR Repositories
-===========================
-
-If your test needs code from a shared repository, the action can clone this
-data on your behalf with::
-
-  install:
-      bzr-repos:
-          - lp:lava-test
-      git-repos:
-          - git://git.linaro.org/people/davelong/lt_ti_lava.git
-
-  run:
-      steps:
-          - cd lt_ti_lava
-          - echo "now in the git cloned directory"
-
-This repository information will also be added to resulting bundle's software
-context when the results are submitted to the LAVA dashboard.
-
-Install Steps
-=============
-
-Before the test shell code is executed, it will optionally do some install
-work if needed. For example if you needed to build some code from a git repo
-you could do::
-
-  install:
-      git-repos:
-          - git://git.linaro.org/people/davelong/lt_ti_lava.git
-
-      steps:
-          - cd lt_ti_lava
-          - make
-
-**NOTE:** The repo steps are done in the dispatcher itself. The install steps
-are run directly on the target.
-
-Advanced Parsing
-================
-
-You may need to incorporate an existing test that doesn't output results in
-in the required pass/fail/skip/unknown format required by LAVA. The parse
-section has a fixup mechanism that can help::
-
-  parse:
-      pattern: "(?P<test_case_id>.*-*)\\s+:\\s+(?P<result>(PASS|FAIL))"
-      fixupdict:
-          PASS: pass
-          FAIL: fail

=== removed file 'doc/multinode-usecases.rst'
--- doc/multinode-usecases.rst	2013-08-19 10:36:07 +0000
+++ doc/multinode-usecases.rst	1970-01-01 00:00:00 +0000
@@ -1,8 +0,0 @@ 
-MultiNode Use Cases
-###################
-
-.. toctree::
-   :maxdepth: 3
-
-   usecaseone.rst
-   usecasetwo.rst

=== removed file 'doc/multinode.rst'
--- doc/multinode.rst	2013-08-23 10:25:00 +0000
+++ doc/multinode.rst	1970-01-01 00:00:00 +0000
@@ -1,291 +0,0 @@ 
-Multi-Node LAVA
-###############
-
-LAVA multi-node support allows users to use LAVA to schedule, synchronise and
-combine the results from tests that span multiple targets. Jobs can be arranged
-as groups of devices (of any type) and devices within a group can operate
-independently or use the MultiNode API to communicate with other devices in the
-same group during tests.
-
-Within a MultiNode group, devices are assigned a role and a ``count`` of devices to
-include into that role. Each role has a ``device_type`` and any number of roles can
-have the same ``device_type``. Each role can be assigned ``tags``.
-
-Once roles are defined, actions (including test images and test definitions) can be marked
-as applying to specific roles (if no role is specified, all roles use the action).
-
-If insufficient boards exist to meet the combined requirements of all the roles specified
-in the job, the job will be rejected.
-
-If there are not enough idle boards of the relevant types to meet the combined requirements
-of all the roles specified in the job, the job waits in the Submitted queue until all
-devices can be allocated.
-
-Once each board has booted the test image, the MultiNode API will be available for use within
-the test definition in the default PATH.
-
-.. toctree::
-   :maxdepth: 3
-
-   multinodeapi.rst
-   multinode-usecases.rst
-   debugging.rst
-
-Hardware requirements and virtualisation
-****************************************
-
-Multi-Node is explicitly about synchronising test operations across multiple boards and running
-Multi-Node jobs on a particular instance will have implications for the workload of that instance.
-This can become a particular problem if the instance is running on virtualised hardware with
-shared I/O, a limited amount of RAM or a limited number of available cores.
-
-e.g. Downloading, preparing and deploying test images can result in a lot of synchronous I/O and
-if this instance is running the server and the dispatcher, this can cause the load on that machine
-to rise significantly, possibly causing the server to become unresponsive.
-
-It is strongly recommended that Multi-Node instances use a separate dispatcher running on
-non-virtualised hardware so that the (possibly virtualised) server can continue to operate.
-
-Also, consider the number of boards connected to any one dispatcher. MultiNode jobs will commonly
-compress and decompress several test image files of several hundred megabytes at precisely the same
-time. Even with a powerful multi-core machine, this has been shown to cause appreciable load. It
-is worth considering matching the number of boards to the number of cores for parallel decompression
-and matching the amount of available RAM to the number and size of test images which are likely to
-be in use.
-
-Extending existing LAVA submissions
-***********************************
-
-To extend an existing JSON file to start a MultiNode job, some changes are required to define the
-``device_group``. If all devices in the group are to use the same actions, simply create a single
-role with a count for how many devices are necessary. Usually, a MultiNode job will need to assign
-different test definitions to different boards and this is done by adding more roles, splitting the
-number of devices between the differing roles and assigning different test definitions to each role.
-
-If a MultiNode job includes devices of more than one ``device_type``, there needs to be a role for
-each different ``device_type`` so that an appropriate image can be deployed.
-
-Where all roles share the same action (e.g. ``submit_results_on_host``), omit the role parameter from
-that action.
-
-If more than one, but not all, roles share one particular action, that action will need to be repeated
-within the JSON file, once for each role using that action.
-
-.. _changes_to_json:
-
-Changes to submission JSON
-==========================
-
-1. ``device`` or ``device_type`` move into a **device_group** list
-2. Each device type has a ``count`` assigned
-  1. If a ``device`` is specified directly, count needs to be one.
-  2. If ``device_type`` is used and count is larger than one, enough
-     devices will be allocated to match the count and all such devices will
-     have the same role and use the same commands and the same actions.
-3. Add tags, if required, to each role.
-4. If specific actions should only be used for particular roles, add a
-   role field to the parameters of the action.
-5. If any action has no role specified, it will be actioned for all roles.
-
-Example JSON::
-
- {
-    "timeout": 18000,
-    "job_name": "simple multinode job",
-    "logging_level": "INFO",
-    "device_group": [
-        {
-            "role": "omap4",
-            "count": 2,
-            "device_type": "panda",
-            "tags": [
-                "mytag1"
-            ]
-        },
-        {
-            "role": "omap3",
-            "count": 1,
-            "device_type": "beaglexm",
-            "tags": [
-                "mytag2"
-            ]
-        }
-    ],
-
-Using actions for particular roles
-==================================
-
-Example JSON::
-
-    "actions": [
-        {
-            "command": "deploy_linaro_image",
-            "parameters": {
-                "image": "file:///home/instance-manager/images/panda-raring_developer_20130529-347.img.gz",
-                "role": "omap4"
-            }
-        },
-        {
-            "command": "deploy_linaro_image",
-            "parameters": {
-                "image": "file:///home/instance-manager/images/beagle-ubuntu-desktop.img.gz",
-                "role": "omap3"
-            }
-        },
-        {
-            "command": "lava_test_shell",
-            "parameters": {
-                "testdef_repos": [
-                    {
-                        "git-repo": "git://git.linaro.org/qa/test-definitions.git",
-                        "testdef": "ubuntu/smoke-tests-basic.yaml"
-                    }
-                ],
-                "timeout": 1800
-            }
-        }
-    }
-
-..
-
-.. note:: Consider using http://jsonlint.com to check your JSON before submission.
-
-
-LAVA Multi-Node timeout behaviour
-*********************************
-
-The submitted JSON includes a timeout value - in single node LAVA, this is applied to each individual action
-executed on the device under test (not for the entire job as a whole). i.e. the default timeout can be smaller
-than any one individual timeout used in the JSON or internally within LAVA.
-
-In Multi-Node LAVA, this timeout is also applied to individual polling operations, so an individual lava-sync
-or a lava-wait will fail on any node which waits longer than the default timeout. The node will receive a failure
-response.
-
-.. _timeouts:
-
-Recommendations on timeouts
-===========================
-
-MultiNode operations have implications for the timeout values used in JSON submissions. If one of the
-synchronisation primitives times out, the sync will fail and the job itself will then time out.
-One reason for a MultiNode job to timeout is if one or more boards in the group failed to boot the
-test image correctly. In this situation, all the other boards will continue until the first
-synchronisation call is made in the test definition for that board.
-
-The time limit applied to a synchronisation primitive starts when the board makes the first request
-to the Coordinator for that sync. Slower boards may well only get to that point in the test definition
-after faster devices (especially KVM devices) have started their part of the sync and timed out
-themselves.
-
-Always review the top level timeout in the JSON submission - a value of 900 seconds (15 minutes) has
-been common during testing. Excessive timeouts would prevent other jobs from using boards where the
-waiting jobs have already failed due to a problem elsewhere in the group. If timeouts are too short,
-jobs will fail unnecessarily.
-
-Balancing timeouts
-^^^^^^^^^^^^^^^^^^
-
-Individual actions and commands can have differing timeouts, so avoid the temptation to change the
-default timeout when a particular action times out in a Multi-Node job. If a particular ``lava-test-shell``
-takes a long time, set an explicit timeout for that particular action:
-
-::
-
- {
-    "timeout": 900,
-    "job_name": "netperf multinode tests",
-    "logging_level": "DEBUG",
- }
-
-
-::
-
-        {
-            "command": "lava_test_shell",
-            "parameters": {
-                "testdef_repos": [
-                    {
-                        "git-repo": "git://git.linaro.org/people/guoqing.zhu/netperf-multinode.git",
-                        "testdef": "netperf-multinode-c-network.yaml"
-                    }
-                ],
-                "timeout": 2400,
-                "role": "client"
-            }
-        },
-        {
-            "command": "lava_test_shell",
-            "parameters": {
-                "testdef_repos": [
-                    {
-                        "git-repo": "git://git.linaro.org/people/guoqing.zhu/netperf-multinode.git",
-                        "testdef": "netperf-multinode-s-network.yaml"
-                    }
-                ],
-                "timeout": 1800,
-                "role": "server"
-            }
-        },
-
-
-Running a server on the device-under-test
-*****************************************
-
-If this server process runs as a daemon, the test definition will need to define something for the device
-under test to actually do or it will simply get to the end of the tests and reboot. For example, if the
-number of operations is known, would be to batch up commands to the daemon, each batch being a test case.
-If the server program can run without being daemonised, it would need to be possible to close it down
-at the end of the test (normally this is the role of the sysadmin in charge of the server box itself).
-
-Making use of third party servers
-=================================
-
-A common part of a MultiNode setup is to download components from third party servers but once the test
-starts, latency and connectivity issues could interfere with the tests.
-
-Using wrapper scripts
-=====================
-
-Wrapper scripts make it easier to test your definitions before submitting to LAVA.
-The wrapper lives in a VCS repository which is specified as one of the testdef_repos and will be
-available in the same directory structure as the original repository. A wrapper script also
-helps the tests to fail early instead of trying to do the rest of the tests.
-
-MultiNode Result Bundles
-************************
-
-Results are generated by each device in the group. At submission time, one device in the group is
-selected to run the job which gets the aggregated result bundle for the entire group.
-
-LAVA Coordinator setup
-**********************
-
-Multi-Node LAVA requires a LAVA Coordinator which manages the messaging within a group of nodes involved in
-a Multi-Node job set according to this API. The LAVA Coordinator is a singleton to which nodes need to connect
-over a TCP port (default: 3079). A single LAVA Coordinator can manage groups from multiple instances.
-If the network configuration uses a firewall, ensure that this port is open for connections from Multi-Node dispatchers.
-
-If multiple coordinators are necessary on a single machine (e.g. to test different versions of the coordinator
-during development), each coordinator needs to be configured for a different port.
-
-If the dispatcher is installed on the same machine as the coordinator, the dispatcher can use the packaged
-configuration file with the default hostname of ``localhost``.
-
-Each dispatcher then needs a copy of the LAVA Coordinator configuration file, modified to point back to the
-hostname of the coordinator:
-
-Example JSON, modified for a coordinator on a machine with a fully qualified domain name::
-
-  {
-    "port": 3079,
-    "blocksize": 4096,
-    "poll_delay": 3,
-    "coordinator_hostname": "control.lab.org"
-  }
-
-An IP address can be specified instead, if appropriate.
-
-Each dispatcher needs to use the same port number and blocksize as is configured for the Coordinator
-on the specified machine. The poll_delay is the number of seconds each node will wait before polling
-the coordinator again.

=== removed file 'doc/multinodeapi.rst'
--- doc/multinodeapi.rst	2013-08-23 10:24:36 +0000
+++ doc/multinodeapi.rst	1970-01-01 00:00:00 +0000
@@ -1,302 +0,0 @@ 
-.. _multinode_api:
-
-MultiNode API
-=============
-
-The LAVA MultiNode API provides a simple way to pass messages using the serial port connection which
-is already available through LAVA. The API is not intended for transfers of large amounts of data. Test
-definitions which need to transfer files, long messages or other large amounts of data need to set up their
-own network configuration, access and download methods and do the transfer in the test definition.
-
-.. _lava_self:
-
-lava-self
----------
-
-Prints the name of the current device.
-
-Usage: ``lava-self``
-
-.. _lava_role:
-
-lava-role
----------
-
-Prints the role the current device is playing in a multi-node job.
-
-Usage: ``lava-role``
-
-*Example.* In a directory with several scripts, one for each role
-involved in the test::
-
-    $ ./run-$(lava-role)
-
-.. _lava-group:
-
-lava-group
-----------
-
-This command will produce in its standard output a representation of the
-device group that is participating in the multi-node test job.
-
-Usage: ``lava-group``
-
-The output format contains one line per device, and each line contains
-the hostname and the role that device is playing in the test, separated
-by a TAB character::
-
-    panda01     client
-    highbank01  loadbalancer
-    highbank02  backend
-    highbank03  backend
-
-.. _lava_send:
-
-lava-send
----------
-
-Sends a message to the group, optionally passing associated key-value
-data pairs. Sending a message is a non-blocking operation. The message
-is guaranteed to be available to all members of the group, but some of
-them might never retrieve it.
-
-Usage: ``lava-send <message-id> [key1=val1 [key2=val2] ...]``
-
-Examples will be provided below, together with ``lava-wait`` and
-``lava-wait-all``.
-
-.. _lava_wait:
-
-lava-wait
----------
-
-Waits until any other device in the group sends a message with the given
-ID. This call will block until such message is sent.
-
-Usage: ``lava-wait <message-id>``
-
-If there was data passed in the message, the key-value pairs will be
-printed in the cache file(/tmp/lava_multi_node_cache.txt in default),
-each in one line. If no key values were passed, nothing is printed.
-
-The message ID data is persistent for the life of the MultiNode group.
-The data can be retrieved at any later stage using ``lava-wait`` and as
-the data is already available, there will be no waiting time for repeat
-calls. If devices continue to send data with the associated message ID,
-that data will continue to be added to the data for that message ID and
-will be returned by subsequent calls to ``lava-wait`` for that message
-ID. Use a different message ID to collate different message data.
-
-.. _lava_wait_all:
-
-lava-wait-all
--------------
-
-Waits until **all** other devices in the group send a message with the
-given message ID. IF ``<role>`` is passed, only wait until all devices
-with that given role send a message.
-
-``lava-wait-all <message-id> [<role>]``
-
-If data was sent by the other devices with the message, the key-value
-pairs will be printed in the cache file(/tmp/lava_multi_node_cache.txt
-in default),each in one line, prefixed with the target name and
-a colon.
-
-Some examples for ``lava-send``, ``lava-wait`` and
-``lava-wait-all`` are given below.
-
-Using ``lava-sync`` or ``lava-wait-all`` in a test definition effectively
-makes all boards in the group run at the speed of the slowest board in
-the group up to the point where the sync or wait is called.
-
-Ensure that the message-id matches an existing call to ``lava-send`` for
-each relevant test definition **before** that test definition calls
-``lava-wait-all`` or any device using that test definition will wait forever
-(and eventually timeout, failing the job).
-
-The message returned can include data from other devices which sent a
-message with the relevant message ID, only the wait is dependent on
-particular devices with a specified role.
-
-As with ``lava-wait``, the message ID is persistent for the duration of
-the MultiNode group.
-
-.. _lava_sync:
-
-lava-sync
----------
-
-Global synchronization primitive. Sends a message, and waits for the
-same message from all of the other devices.
-
-Usage: ``lava-sync <message>``
-
-``lava-sync foo`` is effectively the same as ``lava-send foo`` followed
-by ``lava-wait-all foo``.
-
-.. _lava_network:
-
-lava-network
-------------
-
-Helper script to broadcast IP data from the test image, wait for data to be
-received by the rest of the group (or one role within the group) and then provide
-an interface to retrieve IP data about the group on the command line.
-
-Raising a suitable network interface is a job left for the designer of the test
-definition / image but once a network interface is available, ``lava-network``
-can be asked to broadcast this information to the rest of the group. At a later
-stage of the test, before the IP details of the group need to be used, call
-``lava-network collect`` to receive the same information about the rest of
-the group.
-
-All usage of lava-network needs to use a broadcast (which wraps a call to
-``lava-send``) and a collect (which wraps a call to ``lava-wait-all``). As a
-wrapper around ``lava-wait-all``, collect will block until the rest of the group
-(or devices in the group with the specified role) has made a broadcast.
-
-After the data has been collected, it can be queried for any board specified in
-the output of ``lava-group`` by specifying the parameter to query (as used in the
-broadcast)::
-
- lava-network query panda19 ipv4
- 192.168.3.56
-
- lava-network query beaglexm04 ipv6
- fe80::f2de:f1ff:fe46:8c21
-
- lava-network query arndale02 hostname
- server
-
- lava-network query panda14 hostname-full
- client.localdomain
-
- lava-network query panda19 netmask
- 255.255.255.0
-
- lava-network query panda14 default-gateway
- 192.168.1.1
-
- lava-network query panda17 dns_2
- 8.8.8.8
-
-``lava-network hosts`` can be used to output the list of all boards in the group
-which have returned a fully qualified domain name in a format suitable for
-``/etc/hosts``, appending to the specified file.
-
-Usage:
-
- broadcast: ``lava-network broadcast [interface]``
-
- collect:   ``lava-network collect [interface] <role>``
-
- query:     ``lava-network query [hostname] [option]``
-
- hosts:     ``lava-network hosts [file]``
-
-Example 1: simple client-server multi-node test
------------------------------------------------
-
-Two devices, with roles ``client``, ``server``
-
-LAVA Test Shell test definition (say, ``example1.yaml``)::
-
-    run:
-        steps:
-            - ./run-`lava-role`.sh
-
-The test image or the test definition would then provide two scripts,
-with only one being run on each device, according to the role specified.
-
-``run-server.sh``::
-
-    #!/bin/sh
-
-    iperf -s &
-    lava-send server-ready username=testuser
-    lava-wait client-done
-
-Notes:
-
-* To make use of the server-ready message, some kind of client
-  needs to do a ``lava-wait server-ready``
-* There needs to be a support on a client to do the
-  ``lava-send client-done`` or the wait will fail on the server.
-* If there was more than one client, the server could call
-  ``lava-wait-all client-done`` instead.
-
-
-``run-client.sh``::
-
-    #!/bin/sh
-
-    lava-wait server-ready
-    server=$(cat /tmp/lava_multi_node_cache.txt | cut -d = -f 2)
-    iperf -c $server
-    # ... do something with output ...
-    lava-send client-done
-
-Notes:
-
-* The client waits for the server-ready message as it's first task,
-  then does some work, then sends a message so that the server can
-  move on and do other tests.
-
-Example 2: variable number of clients
--------------------------------------
-
-``run-server.sh``::
-
-    #!/bin/sh
-
-    start-server
-    lava-sync ready
-    lava-sync done
-
-``run-client.sh``::
-
-    #!/bin/sh
-
-    # refer to the server by name, assume internal DNS works
-    server=$(lava-group | grep 'server$' | cut -f 1)
-
-    lava-sync ready
-    run-client
-    lava-sync done
-
-Example 3: peer-to-peer application
------------------------------------
-
-Single role: ``peer``, any number of devices
-
-``run-peer.sh``::
-
-    #!bin/sh
-
-    initialize-data
-    start-p2p-service
-    lava-sync running
-
-    push-data
-    for peer in $(lava-group | cut -f 1); then
-        if [ $peer != $(lava-self) ]; then
-            query-data $peer
-        fi
-    fi
-
-
-Example 4: using lava-network
------------------------------
-
-If the available roles include ''server'' and there is a board named
-''database''::
-
-   #!/bin/sh
-   ifconfig eth0 up
-   # possibly do your own check that this worked
-   lava-network broadcast eth0
-   # do whatever other tasks may be suitable here, then wait...
-   lava-network collect eth0 server
-   # continue with tests and get the information.
-   lava-network query database ipv4

=== removed file 'doc/proxy.rst'
--- doc/proxy.rst	2012-05-07 04:43:52 +0000
+++ doc/proxy.rst	1970-01-01 00:00:00 +0000
@@ -1,105 +0,0 @@ 
-.. _proxy:
-
-Cache Proxy Setting Up
-======================
-
-Before, it used to use an internal cache mechanism for downloaded images and
-hwpacks to avoid downloading repeatly, which could save time and bandwidth.
-
-lava-dispatcher switches to use cache proxy for managing cache files
-automatically. The recommended proxy is Squid.
-
-Install Squid 
-^^^^^^^^^^^^^
-
-Squid is easy to install via apt-get::
-
-    sudo apt-get install squid
-
-Or if you want a configurable squid, refer to the following link to compile
-and install manually: http://wiki.squid-cache.org/SquidFaq/CompilingSquid
-
-Configure Squid
-^^^^^^^^^^^^^^^
-
-You will need to customize accroding to your server, like disk layout, space.
-
-Need to analyse and tune by collecting information when squid running with
-real cases, like cache policy, file system.
-
-Mandatory configuration options
--------------------------------
-
-Based on original /etc/squid/squid.conf, see below tuning.
-
-* cache_dir ufs /var/spool/squid 30720 16 256
-
-  Mandatory option, please modify 30720(MB) to an available size.
-
-  There can be several cache directories on different disk, but it's better not
-  use RAID on the cache directories, it's recommended by Squid: The Definitive
-  Guide that it will always degrades fs performance for squid. 30720 is the
-  cache amount 30GB. 16 and 256 is Level 1 and 2 sub-directories, which is
-  default.
-
-* maximum_object_size 1024000 KB
-
-  Mandatory option.
-
-  Setting the value as 1024000KB makes the squid cache large files less than
-  1GB, for our images are usually a large one but less than 1G.
-
-Optional configuration options
-------------------------------
-
-Some others than mandatory options.
-
-* acl over_conn_limit maxconn 10  # make max connection limit 10
-
-* http_access allow localnet
-
-  Enable localnet, also, we need to define more localnet in server environment
-  to make sure all boards IP and other permitted clients are included.
-
-  acl localnet src 10.122.0.0/16
-
-* http_access deny over_conn_limit
-
-  Make max connection of one client less than 10, it should be enough for
-  a board, it can be increased.
-
-* cache_mem 128 MB
-
-  It can be increased if server MEM is enough, it's for squid mem amount for
-  objects.
-
-* cache_swap_low 90
-
-  cache_swap_high 95
-
-  Cache size will maintain between 90% to 95%. 
-
-* client_lifetime 12 hours
-
-  Make a client continuous accessing time 12hrs, default is 1 days, it can be
-  increased.
-
-* high_response_time_warning 2000
-
-  2s in 1mins no response will log in cache.log.
-
-* There is some email configurations to be set, like 'cache_mgr', it will send
-  mail if cache proxy dies.
-
-The configuration is only workable, there can be more improvement ways, some
-still need to tune on server.
-
-Other tuning
-------------
-
-Open files number can be increased for squid will need more than 1024
-limitations sometimes::
-
-    # ulimit -n
-        1024
-

=== removed file 'doc/sdmux.png'
Binary files doc/sdmux.png	2013-01-04 00:03:43 +0000 and doc/sdmux.png	1970-01-01 00:00:00 +0000 differ
=== removed file 'doc/sdmux.rst'
--- doc/sdmux.rst	2013-02-08 18:29:05 +0000
+++ doc/sdmux.rst	1970-01-01 00:00:00 +0000
@@ -1,129 +0,0 @@ 
-Configuring and Using the SD-Mux
-================================
-
-An sd-mux is a piece of hardware that's been created that allows a single
-SD card to be controlled by two different card readers. Access between the
-card readers is mutually exclusive and requires them to work in conjunction
-with each other. This provides an extremely useful way to deal with embedded
-system use cases for devices that boot from an SD card.
-
-LAVA uses sd-mux devices to allow running unmodified test images including
-bootloaders on test devices. This is a big improvement to the
-`master image`_ approach.
-
-.. _`master image`: http://lava.readthedocs.org/en/latest/lava-image-creation.html#preparing-a-master-image
-
-.. image:: sdmux.png
-
-Manual Usage
-------------
-
-Before deploying to LAVA, its probably best to understand the mechanics of
-the actual device and ensure its functioning. A setup like in the image above
-is assumed where:
-
- * the target end of the mux is plugged into a dev board
- * the host end is plugged into a USB SD card reader
- * the SD card reader is plugged into a USB hub that's plugged into the host
-
-With that in place, the device can be identified. The easiest way to do this
-is:
-
- * ensure the target device is off
- * cause a usb plug event on the host (unplug and plug the usb hub)
-
-At this point, "dmesg" should show what device this SD card appeared under
-like "/dev/sdb". Since these entries can change, the sd-mux code needs to know
-the actual USB device/port information. This can be found with the sdmux.sh
-script by running::
-
-  ./sdmux.sh -f /dev/sdb
-  Finding id for /dev/sdb
-  Device: /devices/pci0000:00/0000:00:1d.7/usb2/2-1/2-1.1
-  Bus ID: 2-1.1
-
-The key piece of information is "Bus ID: 2-1.1". This is required by the sdmux
-script to turn on/off the USB port with access to the device. To turn the
-device off which gives the target safe access run::
-
-  ID=2-1.1
-  ./sdmux -d $ID off
-
-At this point the target can be powered on and use the device. After powering
-off the target, the sd-card can be access on the host with::
-
-  ./sdmux -d $ID on
-
-This command will also print out the device entry like "/dev/sdb" to STDOUT
-
-Deploying in LAVA
------------------
-
-In order for the dispatcher's sd-mux driver to work a few fields must be added
-the device config::
-
-  # client_type required so that the sdmux driver will be used
-  client_type = sdmux
-  # this is the ID as discovered above using "sdmux.sh -f"
-  sdmux_id = 2-1.1
-  # sdmux_version is optional, but can be used to help identify which hardware
-  # revision this target is using.
-  sdmux_version = 0.01-dave_anders
-  # power on/off commands are also required
-  power_on_cmd = /usr/local/bin/pdu_power.sh 1 1
-  power_off_cmd = /usr/local/bin/pdu_power.sh 1 0
-
-About ADB
----------
-
-An issue has been discovered but not yet resolved upstream with ADB. The
-way the ADB daemon runs on the host prevents the sdmux.sh script from
-properly managing the device. Details and the proposed fix can be found
-here: https://android-review.googlesource.com/#/c/50011/
-
-Debugging Problems
-------------------
-
-Figuring out why things aren't working can be tricky. The key thing to keep
-in mind for the current revision of sd-mux hardware is:
-
- * The host can only access the sd-card if the target is powered off
- * The target can only access the sd-card if the host's sd-card reader isn't
-   supplying any current to the mux (ie the USB port should be off)
-
-Additionally, a tricky situation can arise if the host is providing some small
-amount of current. It seems u-boot can access the sd-card to pull the kernel.
-However, the kernel which tries to operate the sd-card at a higher speed will
-fail to mount the root file system.
-
-To really debug things you should open 2 terminals. Terminal 1 should be serial
-console session to your target. Terminal 2 can be used to toggle on/off target
-and toggle on/off the sdmux. Here's an example of how to play with things::
-
-  # from terminal 2, as root
-  export DEVICE=<your device id as described above>
-  alias muxon='<path to lava-dispatcher>/lava_dispatcher/device/sdmux.sh -d $DEVICE on'
-  alias muxoff='<path to lava-dispatcher>/lava_dispatcher/device/sdmux.sh -d $DEVICE off'
-  alias targeton='<your power on command>'
-  alias targetoff='<your power off command>'
-
-  # see if things work from the host
-  targetoff
-  muxon
-  # the muxon will print out where the device is, like /dev/sdb
-
-  # if you know your sd-card has partitions/files check with:
-  fdisk -l /dev/sdc
-
-  # now try mounting
-  mount /dev/sdc1 /mnt ; ls /mnt
-  umount /mnt
-
-  # see if this image will run on the target
-  muxoff
-  targeton
-
-  # at this point switch to terminal 1 to see if the device boots
-
-The steps above can basically get repeated over and over to help narrow down
-where things are breaking at.

=== removed file 'doc/standalonesetup.rst'
--- doc/standalonesetup.rst	2013-09-11 15:49:26 +0000
+++ doc/standalonesetup.rst	1970-01-01 00:00:00 +0000
@@ -1,49 +0,0 @@ 
-Quick Developer Setup
-=====================
-
-*NOTE:* You should most likely follow the `normal installation instructions </static/docs/deployment-tool.html>`_.
-However, these steps can get you a quick setup for local development on
-just the dispatcher::
-
-  # get code
-  $ sudo apt-get install python-virtualenv
-  $ bzr branch lp:lava-dispatcher
-  $ cd lava-dispatcher
-  # set up virtual environment for development
-  $ virtualenv .venv
-  $ . .venv/bin/activate
-  $ pip install keyring
-  $ ./setup.py develop
-  # setup configuration
-  $ mkdir .venv/etc
-  $ cp -r ./lava_dispatcher/default-config/lava-dispatcher .venv/etc/
-  $ cat >.venv/etc/lava-dispatcher/devices/qemu01.conf
-  device_type = qemu
-  $ echo "LAVA_IMAGE_TMPDIR = /tmp" >> .venv/etc/lava-dispatcher/lava-dispatcher.conf
-
-The set up a minimal job like::
-
-    # /tmp/qemu.json
-    {
-      "timeout": 18000,
-      "job_name": "qemu-test",
-      "device_type": "qemu",
-      "target": "qemu01",
-      "actions": [
-        {
-          "command": "deploy_linaro_image",
-          "parameters": {
-            "image": "file:///tmp/beagle-nano.img.gz"
-            }
-        },
-        {
-          "command": "boot_linaro_image"
-        }
-      ]
-    }
-
-And execute the dispatcher with::
-
-  $ lava-dispatch /tmp/qemu.json
-
-.. seealso:: For writing a new dispatcher job file see :ref:`jobfile`

=== removed file 'doc/usage.rst'
--- doc/usage.rst	2013-09-11 15:49:26 +0000
+++ doc/usage.rst	1970-01-01 00:00:00 +0000
@@ -1,34 +0,0 @@ 
-.. _usage:
-
-=====
-Usage
-=====
-
-Workflow Overview
-=================
-
-LAVA Dispatcher can be used in two different ways. One is standalone (without
-the LAVA Scheduler) and another is managed (when LAVA Dispatcher is controlled
-by the LAVA Scheduler).
-
-Standalone usage
-^^^^^^^^^^^^^^^^
-
-In standalone mode a human operator installs LAVA Dispatcher on some device
-(development board, laptop or other computer or a virtual machine), edits the
-job file that are to be executed and then executes them manually (by manually
-running LAVA Dispatcher, the actual execution process are non-interactive).
-
-Follow the `Quick Developer Setup`_ instructions to get started.
-
-.. _Quick Developer Setup: standalonesetup.html
-
-Usage with the LAVA Scheduler
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The scheduler is useful for automating LAVA Dispatcher environment setup, describing test scenarios (the list of tests to invoke) and finally storing the results in the LAVA dashboard.
-
-This scenario can be configured by following our `deployment instructions`_
-or the Documentation link on any LAVA instance.
-
-.. _deployment instructions: /static/docs/

=== removed file 'doc/usecaseone.rst'
--- doc/usecaseone.rst	2013-08-23 10:25:19 +0000
+++ doc/usecaseone.rst	1970-01-01 00:00:00 +0000
@@ -1,521 +0,0 @@ 
-.. _use_case_one:
-
-Use Case One - Setting up a simple client:server test definition.
-*****************************************************************
-
-One device needs to obtain / prepare some data and then make the data
-available to another device in the same group.
-
-Source Code
-===========
-
-* The YAML snippets in this example are not complete, for a working example of the code, see:
-
-  https://git.linaro.org/gitweb?p=people/neilwilliams/multinode-yaml.git;a=blob_plain;f=forwarder.yaml;hb=refs/heads/master
-
-  https://git.linaro.org/gitweb?p=people/neilwilliams/multinode-yaml.git;a=blob_plain;f=receiver.yaml;hb=refs/heads/master
-
-  https://git.linaro.org/gitweb?p=people/neilwilliams/multinode-yaml.git;a=blob_plain;f=json/beagleblack-use-case.json;hb=HEAD
-
-Requirements
-============
-
-1. A mechanism to obtain the data, presumably from some third-party source
-2. A sync to ensure that the file is ready to be offered to the other device
-
- 2.1. This ensures that the attempt to receive does not start early
-
-3. A message to the original board that the data has been received and verified
-
- 3.1. This ensures that any cleanup of the data does not happen before the transfer is complete.
-
-Methods
-=======
-
-* Install a package which can obtain the data from the third party source
-* Install a package which can provide the means to get the data to the other board
-
-Control flow
-============
-
-+------------------------------+----------------------------------------+
-|sender starts                 | receiver starts                        |
-+------------------------------+----------------------------------------+
-|sender obtains the data       | receiver waits for sender to be ready  |
-+------------------------------+----------------------------------------+
-|sender modifies the data      | wait                                   |
-+------------------------------+----------------------------------------+
-|sender notifies receiver      | wait                                   |
-+------------------------------+----------------------------------------+
-|sender waits for completion   | receiver initiates transfer            |
-+------------------------------+----------------------------------------+
-|wait                          | receiver notifies sender of completion |
-+------------------------------+----------------------------------------+
-|sender cleans up              | receiver processes the modified data   |
-+------------------------------+----------------------------------------+
-
-It is clear from the flow that the sender and the receiver are doing
-different things at different times and may well need different packages
-installed. The simplest way to manage this is to have two YAML files.
-
-In this example, sender is going to use wget to obtain the data and
-apache to offer it to the receiver. The receiver will only need wget.
-The example won't actually modify the data, but for the purposes of the
-example, the documentation will ignore the fact that the receiver could
-just get the data directly.
-
-Preparing the YAML
-==================
-
-The name field specified in the YAML will be used later as the basis
-of the filter. To start each YAML file, ensure that the metadata contains
-two metadata fields:
-
-* format : **Lava-Test Test Definition 1.0**
-* description : your own descriptive text
-
-It is useful to also add the maintainer field with your email address
-as this will be needed later if the test is to be added to one of the
-formal test sets.
-
-::
-
- metadata:
-    format: Lava-Test Test Definition 1.0
-    name: multinode-usecaseone
-    description: "MultiNode network test commands"
-    maintainer:
-        - neil.williams@linaro.org
-
-Installing packages for use in a test
--------------------------------------
-
-If your test image raises a usable network interface by default on boot,
-the YAML can specify a list of packages which need to be installed for
-this test definition:
-
-::
-
- install:
-    deps:
-        - wget
-        - apache2
-
-If your test needs to raise the network interface itself, the package
-installation will need to be done in the run steps::
-
- run:
-    steps:
-        - lava-test-case linux-linaro-ubuntu-route-ifconfig-up --shell ifconfig eth0 up
-        - lava-test-case apt-update --shell apt-get update
-        - lava-test-case install-deps --shell apt-get -y install wget apache2
-
-Note that although KVM devices can use apt, the network interface fails
-the LAVA test, so use the manual install steps for non-bridged KVM devices.
-
-Preparing the test to send data
--------------------------------
-
-``modify-data.sh`` would, presumably, unpack the data, modify it in
-some way and pack it back up again. In this example, it would be a no-op
-but note that it still needs to exist in the top level directory of your
-VCS repo and be executable.
-
-Any packages required by ``modify-data.sh`` need to be added to the install
-deps of sender.yaml. Providing useful contents of ``modify-data.sh`` is
-left as an exercise for the reader.
-
-Modification happens before the :ref:`lava_sync` ``download`` which tells the
-receiver that the data is ready to be transferred.
-
-The sender then waits for the receiver to acknowledge a correct download
-using :ref:`lava_sync` ``received`` and cleans up.
-
-sender.yaml
-^^^^^^^^^^^
-
-::
-
- install:
-    deps:
-        - wget
-        - apache2
-
- run:
-   steps:
-        - lava-test-case multinode-network --shell lava-network broadcast eth0
-        - lava-test-case wget-file --shell wget -O /var/www/testfile http://releases.linaro.org/latest/android/arndale/userdata.tar.bz2
-        - ./modify-data.sh
-        - lava-test-case file-sync --shell lava-sync download
-        - lava-test-case done-sync --shell lava-sync received
-        - lava-test-case remove-tgz --shell rm /var/www/testfile
-
-Handling the transfer to the receiver
--------------------------------------
-
-The receiver needs to know where to find the data. The sender can ensure that the
-file is in a particular location, it is up to the YAML to get the rest of the
-information of the network address of the sender. This example assumes that the
-data is modified in some undisclosed manner by the ``./modify-data.sh``
-script which is part of your testdef_repo before the receiver is notified.
-
-The LAVA :ref:`multinode_api` provides ways of querying the network information of devices
-within the group. In order to offer the data via apache, the sender needs to
-raise a suitable network interface, so it calls ifconfig as a lava test case
-first and then uses the lava-network API call to broadcast network information
-about itself.
-
-Equally, the receiver needs to raise a network interface, broadcast
-it's network information and then collect the network information for
-the group.
-
-Note that collect is a blocking call - each of the devices needs to
-broadcast before collect will return. (There is support for collecting
-data only for specific roles but that's outside the scope of this example.)
-
-receiver.yaml
-^^^^^^^^^^^^^
-
-::
-
- install:
-    deps:
-        - wget
-
- run:
-   steps:
-        - lava-test-case linux-linaro-ubuntu-route-ifconfig-up --shell ifconfig eth0 up
-        - lava-test-case multinode-network --shell lava-network broadcast eth0
-        - lava-test-case multinode-get-network --shell lava-network collect eth0
-        - lava-test-case file-sync --shell lava-sync download
-        - lava-test-case wget-from-group --shell ./get-data.sh
-        - lava-test-case get-sync --shell lava-sync received
-        - lava-test-case list-file --shell ls -l /tmp/testfile
-        - lava-test-case remove-file --shell rm /tmp/testfile
-
-
-The receiver then needs to obtain that network information and process
-it to get the full URL of the data. To do command line processing and
-pipes, a helper script is needed:
-
-get-data.sh
-^^^^^^^^^^^
-
-Always use **set -x** in any wrapper / helper scripts which you expect
-to use in a test run to be able to debug test failures.
-
-Ensure that the scripts are marked as executable in your VCS and
-that the appropriate interpreter is installed in your test image.
-
-::
-
- #!/bin/sh
- set -e
- set -x
- DEVICE=`lava-group | grep -m1 receiver|cut -f2`
- SOURCE=`lava-network query $DEVICE ipv4|grep -v LAVA|cut -d: -f2`
- wget -O /tmp/testfile http://${SOURCE}/testfile
-
-
-The ``$DEVICE`` simply matches the first device name in this group
-which contains the string 'receiver' (which comes from the ``role``
-specified in the JSON) and returns the full name of that device,
-e.g. multinode-kvm02 or staging-beagleblack03
-
-This device name is then passed to lava-network query to get the ipv4
-details of that device within this group. The value of ``$SOURCE``
-is an IPv4 address of the sender (assuming that your JSON has defined a
-role for the sender which would contain the 'receiver' string in the name.)
-
-Finally, ``get-data.sh`` does the work of receiving the data from
-the sender. The verification of the data is left as an exercise for
-the reader - one simple method would be for the sender to checksum the
-(modified) data and use ``lava-send`` to make that checksum available
-to devices within the group. The receiver can then use ``lava-wait``
-to get that checksum.
-
-Once ``get-data.sh`` returns, the receiver notifies the sender that
-the transfer is complete, processes the data as it sees fit and cleans up.
-
-Preparing the JSON
-===================
-
-The JSON ties the YAML test definition with the hardware and software to
-run the test definition. The JSON is also where multiple test
-definitions are combined into a single MultiNode test.
-
-General settings
-----------------
-
-.. warning:: **Timeout values need to be reduced from single node examples**
-
- - each synchronisation primitive uses the timeout from the general settings,
- - always check your timeout value - 900 is recommended.
-
-::
-
- {
-    "health_check": false,
-    "logging_level": "DEBUG",
-    "timeout": 900,
-    "job_name": "client-server test",
- }
-
-
-device_group
-^^^^^^^^^^^^
-
-The device_group collates the device-types and the role of each device
-type in the group along with the number of boards to allocate to each
-role.
-
-If count is larger than one, enough devices will be allocated to match
-the count and all such devices will have the same role and use the same
-commands and the same actions. (The job will be rejected if there are
-not enough devices available to satisfy the count.)
-
-::
-
- {
-    "device_group": [
-        {
-            "role": "sender",
-            "count": 1,
-            "device_type": "beaglebone-black",
-            "tags": [
-                "use-case-one"
-            ]
-        },
-        {
-            "role": "receiver",
-            "count": 1,
-            "device_type": "kvm",
-            "tags": [
-                "use-case-one"
-            ]
-        }
-    ],
- }
-
-
-actions
--------
-
-When mixing different device_types in one group, the images to deploy
-will probably vary, so use the role parameter to determine which image
-gets used on which board(s).
-
-deploy_linaro_image
-^^^^^^^^^^^^^^^^^^^
-
-::
-
- {
-    "actions": [
-        {
-            "command": "deploy_linaro_image",
-            "parameters": {
-                "image": "http://images.validation.linaro.org/kvm-debian-wheezy.img.gz",
-                "role": "receiver"
-            }
-        },
-        {
-            "command": "deploy_linaro_image",
-            "parameters": {
-                "image": "http://linaro-gateway/beaglebone/beaglebone_20130625-379.img.gz",
-                "role": "sender"
-            }
-        }
- }
-
-
-lava_test_shell
-^^^^^^^^^^^^^^^
-
-If specific actions should only be used for particular roles, add a role
-field to the parameters of the action.
-
-If any action has no role specified, it will be actioned for all roles.
-
-For Use Case One, we have a different YAML file for each role, so
-we have two lava_test_shell commands.
-
-::
-
- {
-        {
-            "command": "lava_test_shell",
-            "parameters": {
-                "testdef_repos": [
-                    {
-                        "git-repo": "git://git.linaro.org/people/neilwilliams/multinode-yaml.git",
-                        "testdef": "forwarder.yaml"
-                    }
-                ],
-                "role": "sender"
-            }
-        },
-        {
-            "command": "lava_test_shell",
-            "parameters": {
-                "testdef_repos": [
-                    {
-                        "git-repo": "git://git.linaro.org/people/neilwilliams/multinode-yaml.git",
-                        "testdef": "receiver.yaml"
-                    }
-                ],
-                "role": "receiver"
-            }
-        }
- }
-
-
-submit_results
-^^^^^^^^^^^^^^
-
-The results for the entire group get aggregated into a single result
-bundle. Ensure that the bundle stream exists on the specified server
-and that you have permission to add to that stream.
-
-::
-
- {
-        {
-            "command": "submit_results_on_host",
-            "parameters": {
-                "stream": "/anonymous/use-cases/",
-                "server": "http://validation.linaro.org/RPC2/"
-            }
-        }
- }
-
-Prepare a filter for the results
-================================
-
-Now decide how you are going to analyse the results of tests using
-this definition, using the name of the test definition specified in
-the YAML metadata.
-
-Unique names versus shared names
---------------------------------
-
-Each YAML file can have a different name or the name can be shared amongst
-many YAML files at which point those files form one test definition, irrespective
-of what each YAML file actually does. Sharing the name means that the results
-of the test definition always show up under the same test name. Whilst this
-can be useful, be aware that if you subsequently re-use one of the YAML files
-sharing a name in a test which does not use the other YAML files sharing
-the same name, there will be gaps in your data. When the filter is later
-used to prepare a graph, these gaps can make it look as if the test
-failed for a period of time when it was simply that the not all of the
-tests in the shared test definition were run.
-
-A single filter can combine the results of multiple tests, so it is
-generally more flexible to have a unique name in each YAML file and
-combine the tests in the filters.
-
-If you use a unique test definition name for every YAML file, ensure that
-each name is descriptive and relevant so that you can pick the right test
-definition from the list of all tests when preparing the filter. If you
-share test definition names, you will have a shorter list to search.
-
-Filters also allow results to be split by the device type and, in
-Multi-Node, by the role. Each of these parameters is defined by the JSON,
-not the YAML, so care is required when designing your filters to cover
-all uses of the test definition without hiding the data in a set of
-unrelated results.
-
-Create a filter
----------------
-
-To create or modify filters (and the graphs which can be based on them)
-you will need appropriate permissions on the LAVA instance to which are
-you submitting your JSON.
-
-On the website for the instance running the tests, click on Dashboard
-and Filters. If you have permissions, there will be a link entitled
-*Add new filter...*.
-
-The filter name should include most of the data about what this filter
-is intended to do, without whitespace. This name will be preserved through
-to the name of the graph based on this filter and can be changed later if
-necessary. Choose whether to make the filter public and select the bundle
-stream(s) to add into the filter.
-
-If the filter is to aggregate all results for a test across all
-devices and all roles, simply leave the *Attributes* empty. Otherwise,
-*Add a required attribute* and start typing to see the available fields.
-
-To filter by a particular device_type, choose **target.device_type**.
-
-To filter by a particular role (Multi-Node only), choose **role**.
-
-Click *Add a test* to get the list of test definition names for which
-results are available.
-
-Within a test definition, a filter can also select only particular test
-cases. In this Use Case, for example, the filter could choose only the
-``multinode-network``, ``multinode-get-network`` or ``file-sync``
-test cases. Continue to add tests and/or test cases - the more tests
-and/or test cases are added to the filter, the fewer results will
-match.
-
-Click the *Preview* button to apply the filter to the current set of
-results **without saving the filter**.
-
-In the preview, if there are columns with no data or rows with no data
-for specific columns, these will show up as missing data in the filter
-and in graphs based on this filter. This is an indication that you need
-to refine either the filter or the test definitions to get a cohesive
-set of results.
-
-If you are happy with the filter, click on save.
-
-The suggested filter for this use case would simply have a suitable name,
-no required attributes and a single test defined - using a shared name
-specified in each of the YAML files.
-
-::
-
- Bundle streams     /anonymous/instance-manager/
- Test cases         multinode-network 	any
-
-Prepare a graph based on the filter
-===================================
-
-A graph needs an image and the image needs to be part of an image set to
-be visible in the dashboard image reports. Currently, these steps need
-to be done by an admin for the instance concerned.
-
-Once the image exists and it has been added to an image set, changes in
-the filter will be reflected in the graph without the need for
-administrator changes.
-
-Each graph is the result of a single image which itself is basde on a
-single filter. Multiple images are collated into image sets.
-
-Summary
-=======
-
-The full version of this use case are available:
-
-http://git.linaro.org/gitweb?p=people/neilwilliams/multinode-yaml.git;a=blob_plain;f=json/kvm-beagleblack-group.json;hb=HEAD
-
-Example test results are visible here:
-
-http://multinode.validation.linaro.org/dashboard/image-reports/kvm-multinode
-
-http://multinode.validation.linaro.org/dashboard/streams/anonymous/instance-manager/bundles/da117e83d7b137930f98d44b8989dbe0f0c827a4/
-
-This example uses a kvm device as the receiver only because the test environment
-did not have a bridged configuration, so the internal networking of the kvm meant
-that although the KVM could connect to the beaglebone-black, the beaglebone-black
-could not connect to the kvm.
-
-https://git.linaro.org/gitweb?p=people/neilwilliams/multinode-yaml.git;a=blob_plain;f=json/beagleblack-use-case.json;hb=HEAD
-
-https://staging.validation.linaro.org/dashboard/image-reports/beagleblack-usecase
-
-https://staging.validation.linaro.org/dashboard/streams/anonymous/codehelp/bundles/cf4eb9e0022232e97aaec2737b3cd436cd37ab14/
-
-This example uses two beaglebone-black devices.

=== removed file 'doc/usecasetwo.rst'
--- doc/usecasetwo.rst	2013-08-23 10:25:41 +0000
+++ doc/usecasetwo.rst	1970-01-01 00:00:00 +0000
@@ -1,224 +0,0 @@ 
-.. _use_case_two:
-
-Use Case Two - Setting up the same job on multiple devices
-**********************************************************
-
-One test definition (or one set of test definitions) to be run on
-multiple devices of the same device type.
-
-Source Code
-===========
-
-The test definition itself could be an unchanged singlenode test definition, e.g.
-
- https://git.linaro.org/gitweb?p=qa/test-definitions.git;a=blob_plain;f=ubuntu/smoke-tests-basic.yaml;hb=refs/heads/master
-
-Alternatively, it could use the MultiNode API to synchronise the devices, e.g.
-
-  https://git.linaro.org/gitweb?p=people/neilwilliams/multinode-yaml.git;a=blob_plain;f=multinode01.yaml;hb=refs/heads/master
-
-  https://git.linaro.org/gitweb?p=people/neilwilliams/multinode-yaml.git;a=blob_plain;f=multinode02.yaml;hb=refs/heads/master
-
-  https://git.linaro.org/gitweb?p=people/neilwilliams/multinode-yaml.git;a=blob_plain;f=multinode03.yaml;hb=refs/heads/master
-
-Requirements
-============
-
- * Multiple devices running the same test definition.
- * Running multiple test definitions at the same time on all devices in the group.
- * Synchronising multiple devices during a test.
- * Filter the results by device name.
-
-Preparing the YAML
-==================
-
-In the first part of this use case, the same YAML file is to be used to
-test multiple devices. Select your YAML file and, if appropriate, edit
-the name in the metadata.
-
-Preparing the JSON
-===================
-
-The change from a standard single-node JSON file is to expand the device_type
-or device field to a device_group.
-
-The change for multiple devices in MultiNode is within the ``device_group``. To run the test
-multiple devices of the same type, simply increase the ``count``:
-
-::
-
- {
-    "device_group": [
-        {
-            "role": "bear",
-            "count": 2,
-            "device_type": "panda",
-            "tags": [
-                "use-case-two"
-            ]
-        }
- }
-
-If the rest of the JSON refers to a ``role`` other than the one specified
-in the ``device_group``, those JSON sections are ignored.
-
-If other actions in the JSON do not mention a ``role``, the action will
-occur on all devices in the ``device_group``. So with a single role,
-it only matters that a role exists in the ``device_group``.
-
-actions
--------
-
-::
-
- {
-        {
-            "command": "deploy_linaro_image",
-            "parameters": {
-                "image": "https://releases.linaro.org/13.03/ubuntu/panda/panda-quantal_developer_20130328-278.img.gz"
-            }
-           "role": "bear"
-        }
- }
-
-lava_test_shell
-^^^^^^^^^^^^^^^
-
-To run multiple test definitions from one or multiple testdef repositories,
-expand the testdef_repos array:
-
-.. tip:: Remember the JSON syntax.
-
- - continuations need commas, completions do not.
-
-::
-
- {
-        {
-            "command": "lava_test_shell",
-            "parameters": {
-                "testdef_repos": [
-                    {
-                        "git-repo": "git://git.linaro.org/people/neilwilliams/multinode-yaml.git",
-                        "testdef": "multinode01.yaml"
-                    },
-                    {
-                        "git-repo": "git://git.linaro.org/people/neilwilliams/multinode-yaml.git",
-                        "testdef": "multinode02.yaml"
-                    },
-                    {
-                        "git-repo": "git://git.linaro.org/people/neilwilliams/multinode-yaml.git",
-                        "testdef": "multinode03.yaml"
-                    }
-                ],
-                "role": "sender"
-            }
-        },
- }
-
-submit_results
-^^^^^^^^^^^^^^
-
-The results for the entire group get aggregated into a single result
-bundle.
-
-::
-
- {
-        {
-            "command": "submit_results_on_host",
-            "parameters": {
-                "stream": "/anonymous/instance-manager/",
-                "server": "http://validation.linaro.org/RPC2/"
-            }
-        }
- }
-
-Prepare a filter for the results
-================================
-
-The filter for this use case uses a ``required attribute``
-of **target.device_type** to only show results for the specified
-devices (to cover reuse of the YAML on other boards later).
-
-It is also possible to add a second filter which matches a specific **target**
-device.
-
-Adding synchronisation
-======================
-
-So far, the multiple devices have been started together but then had no
-further interaction.
-
-The :ref:`multinode_api` supports communication between devices within
-a group and provides synchronisation primitives. The simplest of these
-primitives, :ref:`lava_sync` was used in :ref:`use_case_one` but there are more
-possibilities available.
-
-:ref:`lava_sync` is a special case of a :ref:`lava_send` followed by a
-:ref:`lava_wait_all`.
-
-Sending messages
-----------------
-
-Messages can be sent using :ref:`lava_send` which is a non-blocking call.
-At a later point, another device in the group can collect the message
-using ``lava-wait`` or ``lava-wait-all`` which will block until
-the message is available.
-
-The message can be a simple identifier (e.g. 'download' or 'ready') and
-is visible to all devices in the group.
-
-Key value pairs can also be sent using the API to broadcast particular
-information.
-
-If multiple devices send the same message ID, the data is collated by
-the LAVA Coordinator. Key value pairs sent with any message ID are
-tagged with the device name which sent the key value pairs.
-
-Receiving messages
-------------------
-
-Message reception will block until the message is available.
-
-For :ref:`lava_wait`, the message is deemed available as soon as any device
-in the group has sent a message with the matching ID. If no devices have
-sent such a message, any device asking for ``lava-wait`` on that ID
-will block until a different board uses ``lava-send`` with the expected
-message ID.
-
-For :ref:`lava_wait_all`, the message is only deemed available if **all
-devices in the group** have already sent a message with the expected message
-ID. Therefore, using ``lava-wait-all`` requires a preceding
-``lava-send``.
-
-When using ``lava-wait-all MESSAGEID ROLE``, the message is only deemed
-available if **all devices with the matching role in the group** have
-sent a message with the expected message ID. If the receiving device has
-the specified role, that device must use a ``lava-send`` for the same
-message ID before using ``lava-wait-all MESSAGEID ROLE``.
-
-::
-
-        - lava-test-case multinode-send-network --shell lava-send ready
-        - lava-test-case multinode-get-network --shell lava-wait ready
-
-It is up to the test writer to ensure that when :ref:`lava_wait` is used,
-that the message ID is sufficiently unique that the first use of that
-message ID denotes the correct point in the YAML.
-
-::
-
-        - lava-test-case multinode-send-message --shell lava-send sending source=$(lava-self) role=$(lava-role) hostname=$(hostname -f) kernver=$(uname -r) kernhost=$(uname -n)
-        - lava-test-case multinode-wait-message --shell lava-wait-all sending
-
-This example will wait until all devices in the group have sent the
-message ID ''sending'' (with or without the associated key value pairs).
-
-Summary
-=======
-
-http://git.linaro.org/gitweb?p=people/neilwilliams/multinode-yaml.git;a=blob_plain;f=json/panda-only-group.json;hb=refs/heads/master
-
-http://multinode.validation.linaro.org/dashboard/image-reports/panda-multinode
-

=== removed directory 'lava'
=== removed file 'lava-dispatch'
--- lava-dispatch	2012-06-14 03:46:53 +0000
+++ lava-dispatch	1970-01-01 00:00:00 +0000
@@ -1,2 +0,0 @@ 
-#!/bin/bash
-exec lava dispatch "$@"

=== removed file 'lava/__init__.py'
--- lava/__init__.py	2012-03-19 13:29:52 +0000
+++ lava/__init__.py	1970-01-01 00:00:00 +0000
@@ -1,3 +0,0 @@ 
-__import__('pkg_resources').declare_namespace(__name__)
-# DO NOT ADD ANYTHING TO THIS FILE!
-# IT MUST STAY AS IS (empty apart from the two lines above)

=== removed directory 'lava/dispatcher'
=== removed file 'lava/dispatcher/__init__.py'
=== removed file 'lava/dispatcher/commands.py'
--- lava/dispatcher/commands.py	2013-08-28 14:55:50 +0000
+++ lava/dispatcher/commands.py	1970-01-01 00:00:00 +0000
@@ -1,174 +0,0 @@ 
-import argparse
-import json
-import logging
-import os
-import sys
-
-from json_schema_validator.errors import ValidationError
-from lava.tool.command import Command
-from lava.tool.errors import CommandError
-from lava.dispatcher.node import NodeDispatcher
-import lava_dispatcher.config
-from lava_dispatcher.config import get_config, get_device_config, get_devices
-from lava_dispatcher.job import LavaTestJob, validate_job_data
-
-
-class SetUserConfigDirAction(argparse.Action):
-    def __call__(self, parser, namespace, value, option_string=None):
-        lava_dispatcher.config.custom_config_path = value
-
-
-class DispatcherCommand(Command):
-    @classmethod
-    def register_arguments(cls, parser):
-        super(DispatcherCommand, cls).register_arguments(parser)
-        parser.add_argument(
-            "--config-dir",
-            default=None,
-            action=SetUserConfigDirAction,
-            help="Configuration directory override (currently %(default)s")
-
-
-class devices(DispatcherCommand):
-    """
-    Lists all the configured devices in this LAVA instance.
-    """
-    def invoke(self):
-        for d in get_devices():
-            print d.hostname
-
-
-class dispatch(DispatcherCommand):
-    """
-    Run test scenarios on virtual and physical hardware
-    """
-
-    @classmethod
-    def register_arguments(cls, parser):
-        super(dispatch, cls).register_arguments(parser)
-        parser.add_argument(
-            "--oob-fd",
-            default=None,
-            type=int,
-            help="Used internally by LAVA scheduler.")
-        parser.add_argument(
-            "--output-dir",
-            default=None,
-            help="Directory to put structured output in.")
-        parser.add_argument(
-            "--validate", action='store_true',
-            help="Just validate the job file, do not execute any steps.")
-        parser.add_argument(
-            "--job-id", action='store', default=None,
-            help=("Set the scheduler job identifier. "
-                  "This alters process name for easier debugging"))
-        parser.add_argument(
-            "job_file",
-            metavar="JOB",
-            help="Test scenario file")
-        parser.add_argument(
-            "--target",
-            default=None,
-            help="Run the job on a specific target device"
-        )
-
-    def invoke(self):
-
-        if os.getuid() != 0:
-            logging.error("lava dispatch has to be run as root")
-            exit(1)
-
-        if self.args.oob_fd:
-            oob_file = os.fdopen(self.args.oob_fd, 'w')
-        else:
-            oob_file = sys.stderr
-
-        # config the python logging
-        # FIXME: move to lava-tool
-        # XXX: this is horrible, but: undo the logging setup lava-tool has
-        # done.
-        del logging.root.handlers[:]
-        del logging.root.filters[:]
-        FORMAT = '<LAVA_DISPATCHER>%(asctime)s %(levelname)s: %(message)s'
-        DATEFMT = '%Y-%m-%d %I:%M:%S %p'
-        logging.basicConfig(format=FORMAT, datefmt=DATEFMT)
-        config = get_config()
-        logging.root.setLevel(config.logging_level)
-
-        # Set process id if job-id was passed to dispatcher
-        if self.args.job_id:
-            try:
-                from setproctitle import getproctitle, setproctitle
-            except ImportError:
-                logging.warning(
-                    ("Unable to set import 'setproctitle', "
-                     "process name cannot be changed"))
-            else:
-                setproctitle("%s [job: %s]" % (
-                    getproctitle(), self.args.job_id))
-
-        # Load the scenario file
-        with open(self.args.job_file) as stream:
-            jobdata = stream.read()
-            json_jobdata = json.loads(jobdata)
-
-        # detect multinode and start a NodeDispatcher to work with the LAVA Coordinator.
-        if not self.args.validate:
-            if 'target_group' in json_jobdata:
-                node = NodeDispatcher(json_jobdata, oob_file, self.args.output_dir)
-                node.run()
-                # the NodeDispatcher has started and closed.
-                exit(0)
-        if self.args.target is None:
-            if 'target' not in json_jobdata:
-                logging.error("The job file does not specify a target device. "
-                              "You must specify one using the --target option.")
-                exit(1)
-        else:
-            json_jobdata['target'] = self.args.target
-            jobdata = json.dumps(json_jobdata)
-        if self.args.output_dir and not os.path.isdir(self.args.output_dir):
-            os.makedirs(self.args.output_dir)
-        job = LavaTestJob(jobdata, oob_file, config, self.args.output_dir)
-
-        #FIXME Return status
-        if self.args.validate:
-            try:
-                validate_job_data(job.job_data)
-            except ValidationError as e:
-                print e
-        else:
-            job.run()
-
-
-class DeviceCommand(DispatcherCommand):
-
-    @classmethod
-    def register_arguments(cls, parser):
-        super(DeviceCommand, cls).register_arguments(parser)
-        parser.add_argument('device')
-
-    @property
-    def device_config(self):
-        try:
-            return get_device_config(self.args.device)
-        except Exception:
-            raise CommandError("no such device: %s" % self.args.device)
-
-
-class connect(DeviceCommand):
-
-    def invoke(self):
-        os.execlp(
-            'sh', 'sh', '-c', self.device_config.connection_command)
-
-
-class power_cycle(DeviceCommand):
-
-    def invoke(self):
-        command = self.device_config.hard_reset_command
-        if not command:
-            raise CommandError(
-                "%s does not have a power cycle command configured" %
-                self.args.device)
-        os.system(command)

=== removed file 'lava/dispatcher/node.py'
--- lava/dispatcher/node.py	2013-09-05 13:22:41 +0000
+++ lava/dispatcher/node.py	1970-01-01 00:00:00 +0000
@@ -1,411 +0,0 @@ 
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-#  node.py
-#
-#  Copyright 2013 Linaro Limited
-#  Author Neil Williams <neil.williams@linaro.org>
-#
-#  This program is free software; you can redistribute it and/or modify
-#  it under the terms of the GNU General Public License as published by
-#  the Free Software Foundation; either version 2 of the License, or
-#  (at your option) any later version.
-#
-#  This program is distributed in the hope that it will be useful,
-#  but WITHOUT ANY WARRANTY; without even the implied warranty of
-#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#  GNU General Public License for more details.
-#
-#  You should have received a copy of the GNU General Public License
-#  along with this program; if not, write to the Free Software
-#  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
-#  MA 02110-1301, USA.
-#
-#
-
-import socket
-from socket import gethostname
-import json
-import logging
-import os
-import copy
-import sys
-import time
-from lava_dispatcher.config import get_config
-from lava_dispatcher.job import LavaTestJob
-
-
-class Poller(object):
-    """
-    Blocking, synchronous socket poller which repeatedly tries to connect
-    to the Coordinator, get a very fast response and then implement the
-    wait.
-    If the node needs to wait, it will get a {"response": "wait"}
-    If the node should stop polling and send data back to the board, it will
-    get a {"response": "ack", "message": "blah blah"}
-    """
-
-    json_data = None
-    blocks = 4 * 1024
-    # how long between polls (in seconds)
-    poll_delay = 1
-    timeout = 0
-
-    def __init__(self, data_str):
-        try:
-            self.json_data = json.loads(data_str)
-        except ValueError:
-            logging.error("bad JSON")
-            exit(1)
-        if 'port' not in self.json_data:
-            logging.error("Misconfigured NodeDispatcher - port not specified")
-        if 'blocksize' not in self.json_data:
-            logging.error("Misconfigured NodeDispatcher - blocksize not specified")
-        self.blocks = int(self.json_data['blocksize'])
-        if "poll_delay" in self.json_data:
-            self.poll_delay = int(self.json_data["poll_delay"])
-        if 'timeout' in self.json_data:
-            self.timeout = self.json_data['timeout']
-
-    def poll(self, msg_str):
-        """
-        Blocking, synchronous polling of the Coordinator on the configured port.
-        Single send operations greater than 0xFFFF are rejected to prevent truncation.
-        :param msg_str: The message to send to the Coordinator, as a JSON string.
-        :return: a JSON string of the response to the poll
-        """
-        # starting value for the delay between polls
-        delay = 1
-        msg_len = len(msg_str)
-        if msg_len > 0xFFFE:
-            logging.error("Message was too long to send!")
-            return
-        c = 0
-        response = None
-        while True:
-            c += self.poll_delay
-            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-            s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
-            try:
-                s.connect((self.json_data['host'], self.json_data['port']))
-                logging.debug("Connecting to LAVA Coordinator on %s:%s" % (self.json_data['host'], self.json_data['port']))
-                delay = self.poll_delay
-            except socket.error as e:
-                logging.warn("socket error on connect: %d %s %s" %
-                             (e.errno, self.json_data['host'], self.json_data['port']))
-                time.sleep(delay)
-                delay += 2
-                s.close()
-                continue
-            logging.debug("sending message: %s" % msg_str[:42])
-            # blocking synchronous call
-            try:
-                # send the length as 32bit hexadecimal
-                ret_bytes = s.send("%08X" % msg_len)
-                if ret_bytes == 0:
-                    logging.debug("zero bytes sent for length - connection closed?")
-                    continue
-                ret_bytes = s.send(msg_str)
-                if ret_bytes == 0:
-                    logging.debug("zero bytes sent for message - connection closed?")
-                    continue
-            except socket.error as e:
-                logging.warn("socket error '%d' on send" % e.message)
-                s.close()
-                continue
-            s.shutdown(socket.SHUT_WR)
-            try:
-                header = s.recv(8)  # 32bit limit as a hexadecimal
-                if not header or header == '':
-                    logging.debug("empty header received?")
-                    continue
-                msg_count = int(header, 16)
-                recv_count = 0
-                response = ''
-                while recv_count < msg_count:
-                    response += s.recv(self.blocks)
-                    recv_count += self.blocks
-            except socket.error as e:
-                logging.warn("socket error '%d' on response" % e.errno)
-                s.close()
-                continue
-            s.close()
-            if not response:
-                time.sleep(delay)
-                # if no response, wait and try again
-                logging.debug("failed to get a response, setting a wait")
-                response = json.dumps({"response": "wait"})
-            try:
-                json_data = json.loads(response)
-            except ValueError:
-                logging.error("response starting '%s' was not JSON" % response[:42])
-                break
-            if json_data['response'] != 'wait':
-                break
-            else:
-                if not (c % int(10 * self.poll_delay)):
-                    logging.info("Waiting ... %d of %d secs" % (c, self.timeout))
-                time.sleep(delay)
-            # apply the default timeout to each poll operation.
-            if c > self.timeout:
-                response = json.dumps({"response": "nack"})
-                break
-        return response
-
-
-def readSettings(filename):
-    """
-    NodeDispatchers need to use the same port and blocksize as the Coordinator,
-    so read the same conffile.
-    The protocol header is hard-coded into the server & here.
-    """
-    settings = {
-        "port": 3079,
-        "blocksize": 4 * 1024,
-        "poll_delay": 1,
-        "coordinator_hostname": "localhost"
-    }
-    with open(filename) as stream:
-        jobdata = stream.read()
-        json_default = json.loads(jobdata)
-    if "port" in json_default:
-        settings['port'] = json_default['port']
-    if "blocksize" in json_default:
-        settings['blocksize'] = json_default["blocksize"]
-    if "poll_delay" in json_default:
-        settings['poll_delay'] = json_default['poll_delay']
-    if "coordinator_hostname" in json_default:
-        settings['coordinator_hostname'] = json_default['coordinator_hostname']
-    return settings
-
-
-class NodeDispatcher(object):
-
-    group_name = ''
-    client_name = ''
-    group_size = 0
-    target = ''
-    role = ''
-    poller = None
-    oob_file = sys.stderr
-    output_dir = None
-    base_msg = None
-    json_data = None
-
-    def __init__(self, json_data, oob_file=sys.stderr, output_dir=None):
-        """
-        Parse the modified JSON to identify the group name,
-        requested port for the group - node comms
-        and get the designation for this node in the group.
-        """
-        settings = readSettings("/etc/lava-coordinator/lava-coordinator.conf")
-        self.json_data = json_data
-        # FIXME: do this with a schema once the API settles
-        if 'target_group' not in json_data:
-            raise ValueError("Invalid JSON to work with the MultiNode Coordinator: no target_group.")
-        self.group_name = json_data['target_group']
-        if 'group_size' not in json_data:
-            raise ValueError("Invalid JSON to work with the Coordinator: no group_size")
-        self.group_size = json_data["group_size"]
-        if 'target' not in json_data:
-            raise ValueError("Invalid JSON for a child node: no target designation.")
-        self.target = json_data['target']
-        if 'timeout' not in json_data:
-            raise ValueError("Invalid JSON - no default timeout specified.")
-        if "sub_id" not in json_data:
-            logging.info("Error in JSON - no sub_id specified. Results cannot be aggregated.")
-            json_data['sub_id'] = None
-        if 'port' in json_data:
-            # lava-coordinator provides a conffile for the port and blocksize.
-            logging.debug("Port is no longer supported in the incoming JSON. Using %d" % settings["port"])
-        if 'role' in json_data:
-            self.role = json_data['role']
-        # hostname of the server for the connection.
-        if 'hostname' in json_data:
-            # lava-coordinator provides a conffile for the group_hostname
-            logging.debug("Coordinator hostname is no longer supported in the incoming JSON. Using %s"
-                          % settings['coordinator_hostname'])
-        self.base_msg = {"port": settings['port'],
-                         "blocksize": settings['blocksize'],
-                         "step": settings["poll_delay"],
-                         "timeout": json_data['timeout'],
-                         "host": settings['coordinator_hostname'],
-                         "client_name": json_data['target'],
-                         "group_name": json_data['target_group'],
-                         # hostname here is the node hostname, not the server.
-                         "hostname": gethostname(),
-                         "role": self.role,
-                         }
-        self.client_name = json_data['target']
-        self.poller = Poller(json.dumps(self.base_msg))
-        self.oob_file = oob_file
-        self.output_dir = output_dir
-
-    def run(self):
-        """
-        Initialises the node into the group, registering the group if necessary
-        (via group_size) and *waiting* until the rest of the group nodes also
-        register before starting the actual job,
-        """
-        init_msg = {"request": "group_data", "group_size": self.group_size}
-        init_msg.update(self.base_msg)
-        logging.info("Starting Multi-Node communications for group '%s'" % self.group_name)
-        logging.debug("init_msg %s" % json.dumps(init_msg))
-        response = json.loads(self.poller.poll(json.dumps(init_msg)))
-        logging.info("Starting the test run for %s in group %s" % (self.client_name, self.group_name))
-        self.run_tests(self.json_data, response)
-        # send a message to the GroupDispatcher to close the group (when all nodes have sent fin_msg)
-        fin_msg = {"request": "clear_group", "group_size": self.group_size}
-        fin_msg.update(self.base_msg)
-        logging.debug("fin_msg %s" % json.dumps(fin_msg))
-        self.poller.poll(json.dumps(fin_msg))
-
-    def __call__(self, args):
-        """ Makes the NodeDispatcher callable so that the test shell can send messages just using the
-        NodeDispatcher object.
-        This function blocks until the specified API call returns. Some API calls may involve a
-        substantial period of polling.
-        :param args: JSON string of the arguments of the API call to make
-        :return: A Python object containing the reply dict from the API call
-        """
-        try:
-            return self._select(json.loads(args))
-        except KeyError:
-            logging.warn("Unable to handle request for: %s" % args)
-
-    def _select(self, json_data):
-        """ Determines which API call has been requested, makes the call, blocks and returns the reply.
-        :param json_data: Python object of the API call
-        :return: Python object containing the reply dict.
-        """
-        reply_str = ''
-        if not json_data:
-            logging.debug("Empty args")
-            return
-        if 'request' not in json_data:
-            logging.debug("Bad call")
-            return
-        if json_data["request"] == "aggregate":
-            # no message processing here, just the bundles.
-            return self._aggregation(json_data)
-        messageID = json_data['messageID']
-        if json_data['request'] == "lava_sync":
-            logging.info("requesting lava_sync '%s'" % messageID)
-            reply_str = self.request_sync(messageID)
-        elif json_data['request'] == 'lava_wait':
-            logging.info("requesting lava_wait '%s'" % messageID)
-            reply_str = self.request_wait(messageID)
-        elif json_data['request'] == 'lava_wait_all':
-            if 'role' in json_data and json_data['role'] is not None:
-                reply_str = self.request_wait_all(messageID, json_data['role'])
-                logging.info("requesting lava_wait_all '%s' '%s'" % (messageID, json_data['role']))
-            else:
-                logging.info("requesting lava_wait_all '%s'" % messageID)
-                reply_str = self.request_wait_all(messageID)
-        elif json_data['request'] == "lava_send":
-            logging.info("requesting lava_send %s" % messageID)
-            reply_str = self.request_send(messageID, json_data['message'])
-        reply = json.loads(str(reply_str))
-        if 'message' in reply:
-            return reply['message']
-        else:
-            return reply['response']
-
-    def _aggregation(self, json_data):
-        """ Internal call to send the bundle message to the coordinator so that the node
-        with sub_id zero will get the complete bundle and everyone else a blank bundle.
-        :param json_data: Arbitrary data from the job which will form the result bundle
-        """
-        if json_data["bundle"] is None:
-            logging.info("Notifying LAVA Coordinator of job completion")
-        else:
-            logging.info("Passing results bundle to LAVA Coordinator.")
-        reply_str = self._send(json_data)
-        reply = json.loads(str(reply_str))
-        if 'message' in reply:
-            return reply['message']
-        else:
-            return reply['response']
-
-    def _send(self, msg):
-        """ Internal call to perform the API call via the Poller.
-        :param msg: The call-specific message to be wrapped in the base_msg primitive.
-        :return: Python object of the reply dict.
-        """
-        new_msg = copy.deepcopy(self.base_msg)
-        new_msg.update(msg)
-        if 'bundle' in new_msg:
-            logging.debug("sending result bundle")
-        else:
-            logging.debug("sending Message %s" % json.dumps(new_msg))
-        return self.poller.poll(json.dumps(new_msg))
-
-    def request_wait_all(self, messageID, role=None):
-        """
-        Asks the Coordinator to send back a particular messageID
-        and blocks until that messageID is available for all nodes in
-        this group or all nodes with the specified role in this group.
-        """
-        # FIXME: if this node has not called request_send for the
-        # messageID used for a wait_all, the node should log a warning
-        # of a broken test definition.
-        if role:
-            return self._send({"request": "lava_wait_all",
-                              "messageID": messageID,
-                              "waitrole": role})
-        else:
-            return self._send({"request": "lava_wait_all",
-                              "messageID": messageID})
-
-    def request_wait(self, messageID):
-        """
-        Asks the Coordinator to send back a particular messageID
-        and blocks until that messageID is available for this node
-        """
-        # use self.target as the node ID
-        wait_msg = {"request": "lava_wait",
-                    "messageID": messageID,
-                    "nodeID": self.target}
-        return self._send(wait_msg)
-
-    def request_send(self, messageID, message):
-        """
-        Sends a message to the group via the Coordinator. The
-        message is guaranteed to be available to all members of the
-        group. The message is only picked up when a client in the group
-        calls lava_wait or lava_wait_all.
-        The message needs to be formatted JSON, not a simple string.
-        { "messageID": "string", "message": { "key": "value"} }
-        The message can consist of just the messageID:
-        { "messageID": "string" }
-        """
-        send_msg = {"request": "lava_send",
-                    "messageID": messageID,
-                    "message": message}
-        return self._send(send_msg)
-
-    def request_sync(self, msg):
-        """
-        Creates and send a message requesting lava_sync
-        """
-        sync_msg = {"request": "lava_sync", "messageID": msg}
-        return self._send(sync_msg)
-
-    def run_tests(self, json_jobdata, group_data):
-        if 'response' in group_data and group_data['response'] == 'nack':
-            logging.error("Unable to initiliase a Multi-Node group - timed out waiting for other devices.")
-            return
-        config = get_config()
-        if 'logging_level' in json_jobdata:
-            logging.root.setLevel(json_jobdata["logging_level"])
-        else:
-            logging.root.setLevel(config.logging_level)
-        if 'target' not in json_jobdata:
-            logging.error("The job file does not specify a target device.")
-            exit(1)
-        jobdata = json.dumps(json_jobdata)
-        if self.output_dir and not os.path.isdir(self.output_dir):
-            os.makedirs(self.output_dir)
-        job = LavaTestJob(jobdata, self.oob_file, config, self.output_dir)
-        # pass this NodeDispatcher down so that the lava_test_shell can __call__ nodeTransport to write a message
-        job.run(self, group_data)

=== removed directory 'lava_dispatcher'
=== removed file 'lava_dispatcher/__init__.py'
--- lava_dispatcher/__init__.py	2013-08-19 10:36:46 +0000
+++ lava_dispatcher/__init__.py	1970-01-01 00:00:00 +0000
@@ -1,21 +0,0 @@ 
-# Copyright (C) 2011 Linaro Limited
-#
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-__version__ = (0, 33, 1, "dev", 0)

=== removed directory 'lava_dispatcher/actions'
=== removed file 'lava_dispatcher/actions/__init__.py'
--- lava_dispatcher/actions/__init__.py	2013-07-18 14:01:21 +0000
+++ lava_dispatcher/actions/__init__.py	1970-01-01 00:00:00 +0000
@@ -1,98 +0,0 @@ 
-#!/usr/bin/python
-
-# Copyright (C) 2011 Linaro Limited
-#
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-from glob import glob
-import imp
-import os
-
-from json_schema_validator.schema import Schema
-from json_schema_validator.validator import Validator
-
-
-null_or_empty_schema = {
-    'type': ['object', 'null'],
-    'additionalProperties': False,
-}
-
-
-class classproperty(object):
-    """Like the builtin @property, but binds to the class not instances."""
-
-    def __init__(self, func):
-        self.func = func
-
-    def __get__(self, ob, cls):
-        return self.func(cls)
-
-
-class BaseAction(object):
-
-    def __init__(self, context):
-        self.context = context
-
-    @property
-    def client(self):
-        return self.context.client
-
-    @classproperty
-    def command_name(cls):
-        cls_name = cls.__name__
-        if cls_name.startswith('cmd_'):
-            return cls_name[4:]
-        else:
-            # This should never happen.  But it's not clear that raising an
-            # AssertionError from this point would be useful either.
-            return cls_name
-
-    def test_name(self, **params):
-        return self.command_name
-
-    param_schema = None
-
-    @classmethod
-    def validate_parameters(cls, params):
-        if cls.parameters_schema:
-            if params is None:
-                params = {}
-            schema = Schema(cls.parameters_schema)
-            Validator.validate(schema, params)
-
-
-def _find_commands(module):
-    cmds = {}
-    for name, cls in module.__dict__.iteritems():
-        if name.startswith("cmd_"):
-            cmds[cls.command_name] = cls
-    return cmds
-
-
-def get_all_cmds():
-    import pkg_resources
-    cmds = {}
-    cmd_path = os.path.dirname(os.path.realpath(__file__))
-    for f in glob(os.path.join(cmd_path, "*.py")):
-        module = imp.load_source("module", os.path.join(cmd_path, f))
-        cmds.update(_find_commands(module))
-    for ep in pkg_resources.iter_entry_points(group="lava_dispatcher.actions"):
-        plugin = ep.load()
-        cmds[plugin.command_name] = plugin
-    return cmds

=== removed file 'lava_dispatcher/actions/android_install_binaries.py'
--- lava_dispatcher/actions/android_install_binaries.py	2012-11-09 20:26:45 +0000
+++ lava_dispatcher/actions/android_install_binaries.py	1970-01-01 00:00:00 +0000
@@ -1,37 +0,0 @@ 
-# Copyright (C) 2012 Linaro Limited
-#
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses>.
-
-import logging
-
-from lava_dispatcher.actions import BaseAction, null_or_empty_schema
-
-
-class cmd_android_install_binaries(BaseAction):
-
-    parameters_schema = null_or_empty_schema
-
-    def run(self):
-        driver_tarball = self.client.config.android_binary_drivers
-        partition = self.client.config.root_part
-
-        if driver_tarball is None:
-            logging.error("android_binary_drivers not defined in any config")
-            return
-
-        self.client.target_device.extract_tarball(driver_tarball, partition)

=== removed file 'lava_dispatcher/actions/android_install_cts_medias.py'
--- lava_dispatcher/actions/android_install_cts_medias.py	2013-07-16 15:59:41 +0000
+++ lava_dispatcher/actions/android_install_cts_medias.py	1970-01-01 00:00:00 +0000
@@ -1,43 +0,0 @@ 
-# Copyright (C) 2012 Linaro Limited
-#
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses>.
-
-import logging
-from lava_dispatcher.actions import BaseAction
-
-
-class cmd_android_install_cts_medias(BaseAction):
-
-    parameters_schema = {
-        'type': 'object',
-        'properties': {
-            'media_url': {'type': 'string', 'optional': True},
-            'timeout': {'type': 'integer', 'optional': True},
-        },
-        'additionalProperties': False,
-    }
-
-    def run(self, media_url=None, timeout=2400):
-        if not media_url:
-            media_url = self.client.config.cts_media_url
-        if not media_url:
-            logging.error("The url for the cts media files is not specified")
-            return
-
-        partition = self.client.config.sdcard_part_android_org
-        self.client.target_device.extract_tarball(media_url, partition)

=== removed file 'lava_dispatcher/actions/boot_control.py'
--- lava_dispatcher/actions/boot_control.py	2013-09-05 18:05:56 +0000
+++ lava_dispatcher/actions/boot_control.py	1970-01-01 00:00:00 +0000
@@ -1,117 +0,0 @@ 
-#!/usr/bin/python
-
-# Copyright (C) 2011 Linaro Limited
-#
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import logging
-
-from lava_dispatcher.actions import BaseAction, null_or_empty_schema
-from lava_dispatcher.errors import (
-    CriticalError,
-    ADBConnectError,
-)
-
-_boot_schema = {
-    'type': 'object',
-    'properties': {
-        'options': {'type': 'array', 'items': {'type': 'string'},
-                    'optional': True},
-    },
-    'additionalProperties': False,
-}
-
-
-class cmd_boot_linaro_android_image(BaseAction):
-    """ Call client code to boot to the master image
-    """
-
-    parameters_schema = _boot_schema
-    parameters_schema['properties']['adb_check'] = {
-        'default': False, 'optional': True
-    }
-    parameters_schema['properties']['wait_for_home_screen'] = {
-        'default': False, 'optional': True
-    }
-    parameters_schema['properties']['wait_for_home_screen_activity'] = {
-        'type': 'string', 'optional': True
-    }
-    parameters_schema['properties']['interactive_boot_cmds'] = {
-        'default': False, 'optional': True
-    }
-
-    def run(self, options=[], adb_check=False,
-            wait_for_home_screen=True, wait_for_home_screen_activity=None,
-            interactive_boot_cmds=False):
-        client = self.client
-        if interactive_boot_cmds:
-            client.config.boot_cmds = options
-        else:
-            client.target_device.boot_options = options
-        if wait_for_home_screen_activity is not None:
-            client.config.android_wait_for_home_screen_activity = \
-             wait_for_home_screen_activity
-        client.config.android_wait_for_home_screen = wait_for_home_screen
-        try:
-            client.boot_linaro_android_image(
-                adb_check=adb_check)
-        except ADBConnectError as err:
-            logging.exception(('boot_linaro_android_image failed to create'
-                               ' the adb connection: %s') % err)
-            raise err
-        except Exception as e:
-            logging.exception("boot_linaro_android_image failed: %s" % e)
-            raise CriticalError("Failed to boot test image.")
-
-
-class cmd_boot_linaro_image(BaseAction):
-    """ Call client code to boot to the test image
-    """
-
-    parameters_schema = _boot_schema
-    parameters_schema['properties']['interactive_boot_cmds'] = {
-        'default': False, 'optional': True
-    }
-
-    def run(self, options=[], interactive_boot_cmds=False):
-        client = self.client
-        if interactive_boot_cmds:
-            client.config.boot_cmds = options
-        else:
-            client.target_device.boot_options = options
-        status = 'pass'
-        try:
-            client.boot_linaro_image()
-        except:
-            logging.exception("boot_linaro_image failed")
-            status = 'fail'
-            raise CriticalError("Failed to boot test image.")
-        finally:
-            self.context.test_data.add_result("boot_image", status)
-
-
-class cmd_boot_master_image(BaseAction):
-    """ Call client code to boot to the master image
-    """
-
-    parameters_schema = null_or_empty_schema
-
-    def run(self):
-        client = self.client
-        client.boot_master_image()

=== removed file 'lava_dispatcher/actions/deploy.py'
--- lava_dispatcher/actions/deploy.py	2013-09-10 14:29:53 +0000
+++ lava_dispatcher/actions/deploy.py	1970-01-01 00:00:00 +0000
@@ -1,139 +0,0 @@ 
-# Copyright (C) 2011 Linaro Limited
-#
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses>.
-
-from lava_dispatcher.actions import BaseAction
-
-
-class cmd_deploy_linaro_image(BaseAction):
-
-    # This is how the schema for parameters should look, but there are bugs in
-    # json_schema_validation that means it doesn't work (see
-    # https://github.com/zyga/json-schema-validator/pull/6).
-
-    ## parameters_schema = {
-    ##     'type': [
-    ##         {
-    ##             'type': 'object',
-    ##             'properties': {
-    ##                 'image': {'type': 'string'},
-    ##                 },
-    ##             'additionalProperties': False,
-    ##             },
-    ##         {
-    ##             'type': 'object',
-    ##             'properties': {
-    ##                 'hwpack': {'type': 'string'},
-    ##                 'rootfs': {'type': 'string'},
-    ##                 'rootfstype': {'type': 'string', 'optional': True, 'default': 'ext3'},
-    ##                 },
-    ##             'additionalProperties': False,
-    ##             },
-    ##         ],
-    ##     }
-
-    parameters_schema = {
-        'type': 'object',
-        'properties': {
-            'hwpack': {'type': 'string', 'optional': True},
-            'rootfs': {'type': 'string', 'optional': True},
-            'image': {'type': 'string', 'optional': True},
-            'rootfstype': {'type': 'string', 'optional': True},
-            'bootloadertype': {'type': 'string', 'optional': True, 'default': 'u_boot'},
-            'role': {'type': 'string', 'optional': True},
-        },
-        'additionalProperties': False,
-    }
-
-    @classmethod
-    def validate_parameters(cls, parameters):
-        super(cmd_deploy_linaro_image, cls).validate_parameters(parameters)
-        if 'hwpack' in parameters:
-            if 'rootfs' not in parameters:
-                raise ValueError('must specify rootfs when specifying hwpack')
-            if 'image' in parameters:
-                raise ValueError('cannot specify image and hwpack')
-        elif 'image' not in parameters:
-            raise ValueError('must specify image if not specifying a hwpack')
-
-    def run(self, hwpack=None, rootfs=None, image=None, rootfstype='ext3', bootloadertype='u_boot'):
-        self.client.deploy_linaro(
-            hwpack=hwpack, rootfs=rootfs, image=image, rootfstype=rootfstype, bootloadertype=bootloadertype)
-
-
-class cmd_deploy_linaro_android_image(BaseAction):
-
-    parameters_schema = {
-        'type': 'object',
-        'properties': {
-            'boot': {'type': 'string'},
-            'system': {'type': 'string'},
-            'data': {'type': 'string'},
-            'rootfstype': {'type': 'string', 'optional': True, 'default': 'ext4'},
-        },
-        'additionalProperties': False,
-    }
-
-    def run(self, boot, system, data, rootfstype='ext4'):
-        self.client.deploy_linaro_android(boot, system, data, rootfstype)
-
-class cmd_deploy_linaro_kernel(BaseAction):
-
-    parameters_schema = {
-        'type': 'object',
-        'properties': {
-            'kernel': {'type': 'string', 'optional': False},
-            'ramdisk': {'type': 'string', 'optional': True},
-            'dtb': {'type': 'string', 'optional': True},
-            'rootfs': {'type': 'string', 'optional': True},
-            'bootloader': {'type': 'string', 'optional': True},
-            'firmware': {'type': 'string', 'optional': True},
-            'rootfstype': {'type': 'string', 'optional': True},
-            'bootloadertype': {'type': 'string', 'optional': True, 'default': 'u_boot'},
-            'role': {'type': 'string', 'optional': True},
-            },
-        'additionalProperties': False,
-        }
-
-    @classmethod
-    def validate_parameters(cls, parameters):
-        super(cmd_deploy_linaro_kernel, cls).validate_parameters(parameters)
-        if 'kernel' not in parameters:
-            raise ValueError('must specify a kernel')
-
-    def run(self, kernel=None, ramdisk=None, dtb=None, rootfs=None, bootloader=None,
-            firmware=None, rootfstype='ext4', bootloadertype='u_boot'):
-        self.client.deploy_linaro_kernel(
-            kernel=kernel, ramdisk=ramdisk, dtb=dtb, rootfs=rootfs,
-            bootloader=bootloader, firmware=firmware, rootfstype=rootfstype, 
-            bootloadertype=bootloadertype)
-
-
-class cmd_dummy_deploy(BaseAction):
-
-    parameters_schema = {
-        'type': 'object',
-        'properties': {
-            'target_type': {'type': 'string', 'enum': ['ubuntu', 'oe', 'android', 'fedora']},
-        },
-        'additionalProperties': False,
-    }
-
-    def run(self, target_type):
-        device = self.client.target_device
-        device.deployment_data = device.target_map[target_type]

=== removed file 'lava_dispatcher/actions/launch_control.py'
--- lava_dispatcher/actions/launch_control.py	2013-08-27 14:37:33 +0000
+++ lava_dispatcher/actions/launch_control.py	1970-01-01 00:00:00 +0000
@@ -1,309 +0,0 @@ 
-#!/usr/bin/python
-
-# Copyright (C) 2011 Linaro Limited
-#
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses>.
-
-import os
-import logging
-import tempfile
-import urlparse
-import xmlrpclib
-import simplejson
-from lava_tool.authtoken import AuthenticatingServerProxy, MemoryAuthBackend
-
-from linaro_dashboard_bundle.io import DocumentIO
-from linaro_dashboard_bundle.evolution import DocumentEvolution
-
-from lava_dispatcher.actions import BaseAction
-from lava_dispatcher.errors import OperationFailed
-from lava_dispatcher.test_data import create_attachment
-import lava_dispatcher.utils as utils
-
-
-class GatherResultsError(Exception):
-    def __init__(self, msg, bundles=None):
-        if not bundles:
-            bundles = []
-        super(GatherResultsError, self).__init__(msg)
-        self.bundles = bundles
-
-
-def _get_dashboard(server, token):
-    if not server.endswith("/"):
-        server = ''.join([server, "/"])
-
-    #add backward compatible for 'dashboard/'-end URL
-    #Fix it: it's going to be deleted after transition
-    if server.endswith("dashboard/"):
-        server = ''.join([server, "xml-rpc/"])
-        logging.warn("Please use whole endpoint URL not just end with 'dashboard/', "
-                     "'xml-rpc/' is added automatically now!!!")
-
-    parsed_server = urlparse.urlparse(server)
-    auth_backend = MemoryAuthBackend([])
-    if parsed_server.username:
-        if token:
-            userless_server = '%s://%s' % (
-                parsed_server.scheme, parsed_server.hostname)
-            if parsed_server.port:
-                userless_server += ':' + str(parsed_server.port)
-            userless_server += parsed_server.path
-            auth_backend = MemoryAuthBackend(
-                [(parsed_server.username, userless_server, token)])
-        else:
-            logging.warn(
-                "specifying a user without a token is unlikely to work")
-    else:
-        if token:
-            logging.warn(
-                "specifying a token without a user is probably useless")
-
-    srv = AuthenticatingServerProxy(
-        server, allow_none=True, use_datetime=True, auth_backend=auth_backend)
-    if server.endswith("xml-rpc/"):
-        logging.warn("Please use RPC2 endpoint instead, xml-rpc is deprecated!!!")
-        dashboard = srv
-    elif server.endswith("RPC2/"):
-        #include lava-server/RPC2/
-        dashboard = srv.dashboard
-    else:
-        logging.warn("The url seems not RPC2 or xml-rpc endpoints, please make sure it's a valid one!!!")
-        dashboard = srv.dashboard
-
-    logging.debug("server RPC endpoint URL: %s" % server)
-    return dashboard
-
-
-class cmd_submit_results(BaseAction):
-
-    parameters_schema = {
-        'type': 'object',
-        'properties': {
-            'server': {'type': 'string'},
-            'stream': {'type': 'string'},
-            'result_disk': {'type': 'string', 'optional': True},
-            'token': {'type': 'string', 'optional': True},
-        },
-        'additionalProperties': False,
-    }
-
-    def _get_bundles(self, files):
-        bundles = []
-        errors = []
-        for fname in files:
-            if os.path.splitext(fname)[1] != ".bundle":
-                continue
-            content = None
-            try:
-                with open(fname, 'r') as f:
-                    doc = DocumentIO.load(f)[1]
-                DocumentEvolution.evolve_document(doc)
-                bundles.append(doc)
-            except ValueError:
-                msg = 'Error adding result bundle %s' % fname
-                errors.append(msg)
-                logging.exception(msg)
-                if content:
-                    logging.info('Adding bundle as attachment')
-                    attachment = create_attachment(fname, content)
-                    self.context.test_data.add_attachments([attachment])
-            except:
-                msg = 'Unknown error processing bundle' % fname
-                logging.exception(msg)
-                errors.append(msg)
-
-        if len(errors) > 0:
-            msg = ' '.join(errors)
-            raise GatherResultsError(msg, bundles)
-        return bundles
-
-    def _get_bundles_from_device(self, result_disk):
-        bundles = []
-        try:
-            result_path = self.client.retrieve_results(result_disk)
-            if result_path is not None:
-                d = tempfile.mkdtemp(dir=self.client.target_device.scratch_dir)
-                files = utils.extract_targz(result_path, d)
-                bundles = self._get_bundles(files)
-        except GatherResultsError:
-            raise
-        except:
-            msg = 'unable to retrieve results from target'
-            logging.exception(msg)
-            raise GatherResultsError(msg)
-        return bundles
-
-    def _get_results_from_host(self):
-        bundles = []
-        errors = []
-        try:
-            bundle_list = os.listdir(self.context.host_result_dir)
-            for bundle_name in bundle_list:
-                bundle = "%s/%s" % (self.context.host_result_dir, bundle_name)
-                content = None
-                try:
-                    with open(bundle) as f:
-                        doc = DocumentIO.load(f)[1]
-                    DocumentEvolution.evolve_document(doc)
-                    bundles.append(doc)
-                except ValueError:
-                    msg = 'Error adding host result bundle %s' % bundle
-                    errors.append(msg)
-                    logging.exception(msg)
-                    if content:
-                        logging.info('Adding bundle as attachment')
-                        attachment = create_attachment(bundle, content)
-                        self.context.test_data.add_attachments([attachment])
-        except:
-            msg = 'Error getting all results from host'
-            logging.exception(msg)
-            raise GatherResultsError(msg, bundles)
-
-        if len(errors) > 0:
-            msg = ' '.join(errors)
-            raise GatherResultsError(msg, bundles)
-
-        return bundles
-
-    def run(self, server, stream, result_disk="testrootfs", token=None):
-        main_bundle = self.collect_bundles(result_disk)
-        self.submit_bundle(main_bundle, server, stream, token)
-
-    def collect_bundles(self, server=None, stream=None, result_disk="testrootfs", token=None):
-        all_bundles = []
-        status = 'pass'
-        err_msg = ''
-        if self.context.any_device_bundles:
-            try:
-                bundles = self._get_bundles_from_device(result_disk)
-                all_bundles.extend(bundles)
-            except GatherResultsError as gre:
-                err_msg = gre.message
-                status = 'fail'
-                all_bundles.extend(gre.bundles)
-        if self.context.any_host_bundles:
-            try:
-                bundles = self._get_results_from_host()
-                all_bundles.extend(bundles)
-            except GatherResultsError as gre:
-                err_msg += ' ' + gre.message
-                status = 'fail'
-                all_bundles.extend(gre.bundles)
-
-        self.context.test_data.add_result('gather_results', status, err_msg)
-
-        main_bundle = self.combine_bundles(all_bundles)
-        return main_bundle
-
-    def combine_bundles(self, all_bundles):
-        if not all_bundles:
-            main_bundle = {
-                "test_runs": [],
-                "format": "Dashboard Bundle Format 1.6"
-            }
-        else:
-            main_bundle = all_bundles.pop(0)
-            test_runs = main_bundle['test_runs']
-            for bundle in all_bundles:
-                test_runs += bundle['test_runs']
-
-        attachments = self.client.get_test_data_attachments()
-        self.context.test_data.add_attachments(attachments)
-
-        main_bundle['test_runs'].append(self.context.test_data.get_test_run())
-
-        for test_run in main_bundle['test_runs']:
-            attributes = test_run.get('attributes', {})
-            attributes.update(self.context.test_data.get_metadata())
-            if "group_size" in attributes:
-                grp_size = attributes['group_size']
-                del attributes['group_size']
-                attributes['group_size'] = "%d" % grp_size
-            test_run['attributes'] = attributes
-
-        return main_bundle
-
-    def submit_bundle(self, main_bundle, server, stream, token):
-        dashboard = _get_dashboard(server, token)
-        json_bundle = DocumentIO.dumps(main_bundle)
-        job_name = self.context.job_data.get('job_name', "LAVA Results")
-        try:
-            result = dashboard.put_ex(json_bundle, job_name, stream)
-            print >> self.context.oob_file, 'dashboard-put-result:', result
-            self.context.output.write_named_data('result-bundle', result)
-            logging.info("Dashboard : %s" % result)
-        except xmlrpclib.Fault, err:
-            logging.warning("xmlrpclib.Fault occurred")
-            logging.warning("Fault code: %d" % err.faultCode)
-            logging.warning("Fault string: %s" % err.faultString)
-            raise OperationFailed("could not push to dashboard")
-
-    def submit_pending(self, bundle, server, stream, token, group_name):
-        """ Called from the dispatcher job when a MultiNode job requests to
-        submit results but the job does not have sub_id zero. The bundle is
-        cached in the dashboard until the coordinator allows sub_id zero to
-        call submit_group_list.
-        :param bundle: A single bundle which is part of the group
-        :param server: Where the bundle will be cached
-        :param token: token to allow access
-        :param group_name: MultiNode group unique ID
-        :raise: OperationFailed if the xmlrpclib call fails
-        """
-        dashboard = _get_dashboard(server, token)
-        json_bundle = simplejson.dumps(bundle)
-        try:
-            # make the put_pending xmlrpc call to store the bundle in the dashboard until the group is complete.
-            result = dashboard.put_pending(json_bundle, stream, group_name)
-            print >> self.context.oob_file, "dashboard-put-pending:", result
-            logging.info("Dashboard: bundle %s is pending in %s" % (result, group_name))
-        except xmlrpclib.Fault, err:
-            logging.warning("xmlrpclib.Fault occurred")
-            logging.warning("Fault code: %d" % err.faultCode)
-            logging.warning("Fault string: %s" % err.faultString)
-            raise OperationFailed("could not push pending bundle to dashboard")
-
-    def submit_group_list(self, bundle, server, stream, token, group_name):
-        """ Called from the dispatcher job when a MultiNode job has been
-         allowed by the coordinator to aggregate the group bundles as
-         all jobs in the group have registered bundle checksums with the coordinator.
-        :param bundle: The single bundle from this job to be added to the pending list.
-        :param server: Where the aggregated bundle will be submitted
-        :param stream: The bundle stream to use
-        :param token: The token to allow access
-        :param group_name: MultiNode group unique ID
-        :raise: OperationFailed if the xmlrpclib call fails
-        """
-        dashboard = _get_dashboard(server, token)
-        json_bundle = simplejson.dumps(bundle)
-        job_name = self.context.job_data.get("job_name", "LAVA Results")
-        try:
-            # make the put_group xmlrpc call to aggregate the bundles for the entire group & submit.
-            result = dashboard.put_group(json_bundle, job_name, stream, group_name)
-            print >> self.context.oob_file, "dashboard-group:", result, job_name
-            self.context.output.write_named_data('result-bundle', result)
-            logging.info("Dashboard: bundle %s is to be aggregated into %s" % (result, group_name))
-        except xmlrpclib.Fault, err:
-            logging.warning("xmlrpclib.Fault occurred")
-            logging.warning("Fault code: %d" % err.faultCode)
-            logging.warning("Fault string: %s" % err.faultString)
-            raise OperationFailed("could not push group bundle to dashboard")
-
-
-class cmd_submit_results_on_host(cmd_submit_results):
-    pass

=== removed file 'lava_dispatcher/actions/lava_android_test.py'
--- lava_dispatcher/actions/lava_android_test.py	2013-07-17 09:16:46 +0000
+++ lava_dispatcher/actions/lava_android_test.py	1970-01-01 00:00:00 +0000
@@ -1,229 +0,0 @@ 
-#!/usr/bin/python
-
-# Copyright (C) 2011-2012 Linaro Limited
-#
-# Author: Linaro Validation Team <linaro-dev@lists.linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses>.
-
-import os
-import logging
-from lava_dispatcher.actions import BaseAction
-from lava_dispatcher.errors import OperationFailed, TimeoutError
-from lava_dispatcher.utils import generate_bundle_file_name, DrainConsoleOutput
-
-
-class AndroidTestAction(BaseAction):
-
-    def check_lava_android_test_installed(self):
-        rc = os.system('which lava-android-test')
-        if rc != 0:
-            raise OperationFailed('lava-android-test has not been installed')
-
-
-class cmd_lava_android_test_run(AndroidTestAction):
-
-    parameters_schema = {
-        'type': 'object',
-        'properties': {
-            'test_name': {'type': 'string'},
-            'option': {'type': 'string', 'optional': True},
-            'timeout': {'type': 'integer', 'optional': True},
-        },
-        'additionalProperties': False,
-    }
-
-    def test_name(self, test_name, option=None, timeout=-1):
-        return super(cmd_lava_android_test_run, self).test_name() + ' (%s)' % test_name
-
-    def run(self, test_name, option=None, timeout=-1):
-        #Make sure in test image now
-        self.check_lava_android_test_installed()
-        with self.client.android_tester_session() as session:
-            bundle_name = generate_bundle_file_name(test_name)
-            cmds = ["lava-android-test", 'run', test_name,
-                    '-s', session.dev_name,
-                    '-o', '%s/%s.bundle' % (self.context.host_result_dir,
-                                            bundle_name)]
-            if option is not None:
-                cmds.extend(['-O', option])
-            if timeout != -1:
-                cmds.insert(0, 'timeout')
-                cmds.insert(1, '%ss' % timeout)
-
-            t = DrainConsoleOutput(proc=session._connection, timeout=timeout)
-            t.start()
-            logging.info("Execute command on host: %s" % (' '.join(cmds)))
-            rc = self.context.run_command(cmds)
-            t.join()
-            if rc == 124:
-                raise TimeoutError(
-                    "The test case(%s) on device(%s) timed out" % (
-                    test_name, session.dev_name))
-            elif rc != 0:
-                raise OperationFailed(
-                    "Failed to run test case(%s) on device(%s) with return "
-                    "value: %s" % (test_name, session.dev_name, rc))
-
-
-class cmd_lava_android_test_run_custom(AndroidTestAction):
-
-    parameters_schema = {
-        'type': 'object',
-        'properties': {
-            'commands': {'type': 'array', 'items': {'type': 'string'},
-                         'optional': True},
-            'command_file': {'type': 'string', 'optional': True},
-            'parser': {'type': 'string', 'optional': True},
-            'timeout': {'type': 'integer', 'optional': True},
-        },
-        'additionalProperties': False,
-    }
-
-    def test_name(self, commands=None, command_file=None, parser=None,
-                  timeout=-1):
-        if commands:
-            return '%s (commands=[%s])' % (
-                super(cmd_lava_android_test_run_custom, self).test_name(),
-                ','.join(commands))
-        elif command_file:
-            return '%s (command-file=%s)' % (
-                super(cmd_lava_android_test_run_custom, self).test_name(), command_file)
-
-    def run(self, commands=None, command_file=None, parser=None, timeout=-1):
-        """
-        :param commands: a list of commands
-        :param command_file: a file containing commands
-        :param parser:  The parser to use for the test
-        :param timeout: The timeout to apply.
-        """
-        #Make sure in test image now
-        self.check_lava_android_test_installed()
-        if commands or command_file:
-            with self.client.android_tester_session() as session:
-                bundle_name = generate_bundle_file_name('custom')
-                cmds = ["lava-android-test", 'run-custom']
-                if commands:
-                    for command in commands:
-                        cmds.extend(['-c', command])
-                elif command_file:
-                    cmds.extend(['-f', command_file])
-                else:
-                    raise OperationFailed(
-                        "Only one of the -c and -f option can be specified"
-                        " for lava_android_test_run_custom action")
-                cmds.extend(['-s', session.dev_name, '-o',
-                             '%s/%s.bundle' % (self.context.host_result_dir,
-                                               bundle_name)])
-                if parser is not None:
-                    cmds.extend(['-p', parser])
-
-                if timeout != -1:
-                    cmds.insert(0, 'timeout')
-                    cmds.insert(1, '%ss' % timeout)
-                logging.info("Execute command on host: %s" % (' '.join(cmds)))
-                rc = self.context.run_command(cmds)
-                if rc == 124:
-                    raise TimeoutError(
-                        "The test (%s) on device(%s) timed out." % (
-                        ' '.join(cmds), session.dev_name))
-                elif rc != 0:
-                    raise OperationFailed(
-                        "Failed to run test custom case[%s] on device(%s)"
-                        " with return value: %s" % (' '.join(cmds),
-                                                    session.dev_name, rc))
-
-
-class cmd_lava_android_test_run_monkeyrunner(AndroidTestAction):
-    """
-    This action is added to make doing the monkeyrunner script test more easily
-    from android build page. With this action, we only need to specify the url
-    of the repository where the monkeyrunner script are stored.
-    Then lava-android-test will run all the monkeyrunner scripts in that
-    repository, and help to gather all the png files genereated when run
-    """
-    parameters_schema = {
-        'type': 'object',
-        'properties': {
-            'url': {'type': 'string'},
-            'timeout': {'type': 'integer', 'optional': True},
-        },
-        'additionalProperties': False,
-    }
-
-    def test_name(self, url=None, timeout=-1):
-        return '%s (url=[%s])' % (super(cmd_lava_android_test_run_monkeyrunner, self).test_name(), url)
-
-    def run(self, url=None, timeout=-1):
-        #Make sure in test image now
-        self.check_lava_android_test_installed()
-        with self.client.android_tester_session() as session:
-            bundle_name = generate_bundle_file_name('monkeyrunner')
-            cmds = ["lava-android-test", 'run-monkeyrunner', url]
-            cmds.extend(['-s', session.dev_name, '-o',
-                         '%s/%s.bundle' % (self.context.host_result_dir,
-                                           bundle_name)])
-            if timeout != -1:
-                cmds.insert(0, 'timeout')
-                cmds.insert(1, '%ss' % timeout)
-
-            logging.info("Execute command on host: %s" % (' '.join(cmds)))
-            rc = self.context.run_command(cmds)
-            if rc == 124:
-                raise TimeoutError("Failed to run monkeyrunner test url[%s] on device(%s)" % (url, session.dev_name))
-            elif rc != 0:
-                raise OperationFailed(
-                    "Failed to run monkeyrunner test url[%s] on device(%s)"
-                    " with return value: %s" % (url, session.dev_name, rc))
-
-
-class cmd_lava_android_test_install(AndroidTestAction):
-    """
-    lava-test deployment to test image rootfs by chroot
-    """
-
-    parameters_schema = {
-        'type': 'object',
-        'properties': {
-            'tests': {'type': 'array', 'items': {'type': 'string'}},
-            'option': {'type': 'string', 'optional': True},
-            'timeout': {'type': 'integer', 'optional': True},
-        },
-        'additionalProperties': False,
-    }
-
-    def run(self, tests, option=None, timeout=2400):
-        self.check_lava_android_test_installed()
-        with self.client.android_tester_session() as session:
-            for test in tests:
-                cmds = ["lava-android-test", 'install',
-                        test,
-                        '-s', session.dev_name]
-                if option is not None:
-                    cmds.extend(['-o', option])
-                if timeout != -1:
-                    cmds.insert(0, 'timeout')
-                    cmds.insert(1, '%ss' % timeout)
-                logging.info("Execute command on host: %s" % (' '.join(cmds)))
-                rc = self.context.run_command(cmds)
-                if rc == 124:
-                    raise OperationFailed(
-                        "The installation of test case(%s)"
-                        " on device(%s) timed out" % (test, session.dev_name))
-                elif rc != 0:
-                    raise OperationFailed(
-                        "Failed to install test case(%s) on device(%s) with "
-                        "return value: %s" % (test, session.dev_name, rc))

=== removed file 'lava_dispatcher/actions/lava_test.py'
--- lava_dispatcher/actions/lava_test.py	2012-11-20 21:22:17 +0000
+++ lava_dispatcher/actions/lava_test.py	1970-01-01 00:00:00 +0000
@@ -1,211 +0,0 @@ 
-#!/usr/bin/python
-
-# Copyright (C) 2011-2012 Linaro Limited
-#
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import logging
-
-from lava_dispatcher.actions import BaseAction
-from lava_dispatcher.errors import OperationFailed
-from lava_dispatcher.utils import generate_bundle_file_name
-
-
-def _install_lava_test(client, session):
-    #install bazaar in tester image
-    session.run('%s update' % client.aptget_cmd)
-    #Install necessary packages for build lava-test
-    cmd = ('%s -y --force-yes install '
-           'bzr usbutils python-apt python-setuptools '
-           'python-simplejson lsb-release python-keyring '
-           'python-pip' % client.aptget_cmd)
-    session.run(cmd, timeout=2400)
-
-    dispatcher_config = client.context.config
-
-    lava_test_deb = dispatcher_config.lava_test_deb
-    if lava_test_deb:
-        logging.debug("Installing %s with apt-get" % lava_test_deb)
-        session.run("%s -y --force-yes install %s"
-            % (client.aptget_cmd, lava_test_deb))
-    else:
-        lava_test_url = dispatcher_config.lava_test_url
-        logging.debug("Installing %s with pip" % lava_test_url)
-        session.run('pip install -e ' + lava_test_url)
-
-    #Test if lava-test installed
-    session.run('which lava-test', timeout=60)
-
-    # cleanup the lava-test - old results, cached files...
-    session.run('lava-test reset', timeout=60)
-
-
-class cmd_lava_test_run(BaseAction):
-
-    parameters_schema = {
-        'type': 'object',
-        'properties': {
-            'test_name': {'type': 'string'},
-            'test_options': {'type': 'string', 'optional': True},
-            'timeout': {'type': 'integer', 'optional': True},
-            },
-        'additionalProperties': False,
-        }
-
-    def test_name(self, test_name, test_options="", timeout=-1):
-        return super(cmd_lava_test_run, self).test_name() + ' (%s)' % test_name
-
-    def run(self, test_name, test_options="", timeout=-1):
-        self.context.any_device_bundles = True
-        logging.info("Executing lava_test_run %s command" % test_name)
-        with self.client.tester_session() as session:
-            session.run('mkdir -p %s' % self.context.config.lava_result_dir)
-            session.export_display()
-            bundle_name = generate_bundle_file_name(test_name)
-            if test_options != "":
-                test_options = "-t '%s'" % test_options
-
-            cmd = ('lava-test run %s %s -o %s/%s.bundle' % (
-                    test_name, test_options,
-                    self.context.config.lava_result_dir, bundle_name))
-            try:
-                rc = session.run(cmd, timeout=timeout)
-            except:
-                logging.exception("session.run failed")
-                self.client.proc.sendcontrol('c')
-                try:
-                    session.run('true', timeout=20)
-                except:
-                    logging.exception("killing test failed, rebooting")
-                    self.client.boot_linaro_image()
-                raise
-            finally:
-                # try to make sure the test bundle is safely written to disk
-                session.run('sync', timeout=60)
-
-            if rc is None:
-                raise OperationFailed("test case getting return value failed")
-            elif rc != 0:
-                raise OperationFailed(
-                        "test case failed with return value: %s" % rc)
-
-
-class cmd_lava_test_install(BaseAction):
-    """
-    lava-test deployment to test image rootfs by chroot
-    """
-
-    parameters_schema = {
-        'type': 'object',
-        'properties': {
-            'tests': {'type': 'array', 'items': {'type': 'string'}},
-            'install_python': {
-                'type': 'array', 'items': {'type': 'string'}, 'optional': True
-                },
-            'install_deb': {
-                'type': 'array', 'items': {'type': 'string'}, 'optional': True
-                },
-            'register': {
-                'type': 'array', 'items': {'type': 'string'}, 'optional': True
-                },
-            'timeout': {'type': 'integer', 'optional': True},
-            'install_lava_test': {'type': 'boolean', 'optional': True, 'default': True}
-            },
-        'additionalProperties': False,
-        }
-
-    def run_command_with_test_result(self, session, command, test_result_name, timeout):
-        try:
-            session.run(command, timeout=timeout)
-        except OperationFailed as e:
-            logging.error("running %r failed" % command)
-            self.context.test_data.add_result(test_result_name, 'fail', str(e))
-        else:
-            self.context.test_data.add_result(test_result_name, 'pass')
-
-    def run(self, tests, install_python=None, install_deb=None, register=None,
-            timeout=2400, install_lava_test=True):
-        logging.info(
-            "Executing lava_test_install (%s) command" % ",".join(tests))
-
-        with self.client.reliable_session() as session:
-
-            lava_proxy = self.context.config.lava_proxy
-            if lava_proxy:
-                session.run("sh -c 'export http_proxy=%s'" % lava_proxy)
-
-            if install_lava_test:
-                _install_lava_test(self.client, session)
-
-            if install_python:
-                for module in install_python:
-                    self.run_command_with_test_result(
-                        session, "pip install -e " + module,
-                        'lava_test_install python (%s)' % module, timeout=60)
-
-            if install_deb:
-                debs = " ".join(install_deb)
-                self.run_command_with_test_result(
-                    session, "%s -y --force-yes install %s"
-                    % (self.client.aptget_cmd, debs),
-                    'lava_test_install deb (%s)' % debs, timeout=timeout)
-
-            if register:
-                for test_def_url in register:
-                    self.run_command_with_test_result(
-                        session, 'lava-test register-test  ' + test_def_url,
-                        'lava_test_install register (%s)' % test_def_url, timeout=60)
-
-            for test in tests:
-                self.run_command_with_test_result(
-                    session, 'lava-test install %s' % test,
-                    'lava_test_install (%s)' % test, timeout=timeout)
-
-            session.run('rm -rf lava-test', timeout=60)
-
-
-class cmd_add_apt_repository(BaseAction):
-    """
-    add apt repository to test image rootfs by chroot
-    arg could be 'deb uri distribution [component1] [component2][...]'
-    or ppa:<ppa_name>
-    """
-
-    parameters_schema = {
-        'type': 'object',
-        'properties': {
-            'arg': {
-                'type': 'array',
-                'items': {'type': 'string'},
-                }
-            },
-        'additionalProperties': False,
-        }
-
-    def run(self, arg):
-        with self.client.reliable_session() as session:
-
-            #install add-apt-repository
-            session.run('%s -y install python-software-properties'
-                % self.client.aptget_cmd)
-
-            #add ppa
-            for repository in arg:
-                session.run('add-apt-repository %s < /dev/null' % repository)
-            session.run('%s update' % self.client.aptget_cmd)

=== removed file 'lava_dispatcher/actions/lava_test_shell.py'
--- lava_dispatcher/actions/lava_test_shell.py	2013-09-12 07:08:25 +0000
+++ lava_dispatcher/actions/lava_test_shell.py	1970-01-01 00:00:00 +0000
@@ -1,722 +0,0 @@ 
-#!/usr/bin/python
-
-# Copyright (C) 2012 Linaro Limited
-#
-# Author: Andy Doan <andy.doan@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-# LAVA Test Shell implementation details
-# ======================================
-#
-# The idea of lava-test-shell is a YAML test definition is "compiled" into a
-# job that is run when the device under test boots and then the output of this
-# job is retrieved and analyzed and turned into a bundle of results.
-#
-# In practice, this means a hierarchy of directories and files is created
-# during test installation, a sub-hierarchy is created during execution to
-# hold the results and these latter sub-hierarchy whole lot is poked at on the
-# host during analysis.
-#
-# On Ubuntu and OpenEmbedded, the hierarchy is rooted at /lava.  / is mounted
-# read-only on Android, so there we root the hierarchy at /data/lava.  I'll
-# assume Ubuntu paths from here for simplicity.
-#
-# The directory tree that is created during installation looks like this:
-#
-# /lava/
-#    bin/                          This directory is put on the path when the
-#                                  test code is running -- these binaries can
-#                                  be viewed as a sort of device-side "API"
-#                                  for test authors.
-#       lava-test-runner           The job that runs the tests on boot.
-#       lava-test-shell            A helper to run a test suite.
-#       lava-test-case             A helper to record information about a test
-#                                  result.
-#       lava-test-case-attach      A helper to attach a file to a test result.
-#    tests/
-#       ${IDX}_${TEST_ID}/         One directory per test to be executed.
-#          uuid                    The "analyzer_assigned_uuid" of the
-#                                  test_run that is being generated.
-#          testdef.yml             The test definition.
-#          testdef_metadata        Metadata extracted from test definition.
-#          install.sh              The install steps.
-#          run.sh                  The run steps.
-#          [repos]                 The test definition can specify bzr or git
-#                                  repositories to clone into this directory.
-#
-# In addition, a file /etc/lava-test-runner.conf is created containing the
-# names of the directories in /lava/tests/ to execute.
-#
-# During execution, the following files are created:
-#
-# /lava/
-#    results/
-#       hwcontext/                 Each test_run in the bundle has the same
-#                                  hw & sw context info attached to it.
-#          cpuinfo.txt             Hardware info.
-#          meminfo.txt             Ditto.
-#       swcontext/
-#          build.txt               Software info.
-#          pkgs.txt                Ditto
-#       ${IDX}_${TEST_ID}-${TIMESTAMP}/
-#          testdef.yml
-#          testdef_metadata
-#          stdout.log
-#          return_code             The exit code of run.sh.
-#          analyzer_assigned_uuid
-#          attachments/
-#             install.sh
-#             run.sh
-#             ${FILENAME}          The attached data.
-#             ${FILENAME}.mimetype  The mime type of the attachment.
-#           attributes/
-#              ${ATTRNAME}         Content is value of attribute
-#          tags/
-#             ${TAGNAME}           Content of file is ignored.
-#          results/
-#             ${TEST_CASE_ID}/     Names the test result.
-#                result            (Optional)
-#                measurement
-#                units
-#                message
-#                timestamp
-#                duration
-#                attributes/
-#                   ${ATTRNAME}    Content is value of attribute
-#                attachments/      Contains attachments for test results.
-#                   ${FILENAME}           The attached data.
-#                   ${FILENAME}.mimetype  The mime type of the attachment.
-#
-# After the test run has completed, the /lava/results directory is pulled over
-# to the host and turned into a bundle for submission to the dashboard.
-
-from datetime import datetime
-from glob import glob
-import base64
-import logging
-import os
-import pexpect
-import pkg_resources
-import shutil
-import stat
-import StringIO
-import subprocess
-import tarfile
-import tempfile
-import time
-from uuid import uuid4
-
-import yaml
-
-from linaro_dashboard_bundle.io import DocumentIO
-
-import lava_dispatcher.lava_test_shell as lava_test_shell
-from lava_dispatcher.signals import SignalDirector
-from lava_dispatcher import utils
-
-from lava_dispatcher.actions import BaseAction
-from lava_dispatcher.device.target import Target
-from lava_dispatcher.downloader import download_image
-
-LAVA_TEST_DIR = '%s/../../lava_test_shell' % os.path.dirname(__file__)
-LAVA_MULTI_NODE_TEST_DIR = '%s/../../lava_test_shell/multi_node' % os.path.dirname(__file__)
-
-LAVA_GROUP_FILE = 'lava-group'
-LAVA_ROLE_FILE = 'lava-role'
-LAVA_SELF_FILE = 'lava-self'
-LAVA_SEND_FILE = 'lava-send'
-LAVA_SYNC_FILE = 'lava-sync'
-LAVA_WAIT_FILE = 'lava-wait'
-LAVA_WAIT_ALL_FILE = 'lava-wait-all'
-LAVA_MULTI_NODE_CACHE_FILE = '/tmp/lava_multi_node_cache.txt'
-
-Target.android_deployment_data['distro'] = 'android'
-Target.android_deployment_data['lava_test_sh_cmd'] = '/system/bin/mksh'
-Target.android_deployment_data['lava_test_dir'] = '/data/lava'
-Target.android_deployment_data['lava_test_results_part_attr'] = 'data_part_android_org'
-
-Target.ubuntu_deployment_data['distro'] = 'ubuntu'
-Target.ubuntu_deployment_data['lava_test_sh_cmd'] = '/bin/bash'
-Target.ubuntu_deployment_data['lava_test_dir'] = '/lava'
-Target.ubuntu_deployment_data['lava_test_results_part_attr'] = 'root_part'
-
-Target.oe_deployment_data['distro'] = 'oe'
-Target.oe_deployment_data['lava_test_sh_cmd'] = '/bin/sh'
-Target.oe_deployment_data['lava_test_dir'] = '/lava'
-Target.oe_deployment_data['lava_test_results_part_attr'] = 'root_part'
-
-Target.fedora_deployment_data['distro'] = 'fedora'
-Target.fedora_deployment_data['lava_test_sh_cmd'] = '/bin/bash'
-Target.fedora_deployment_data['lava_test_dir'] = '/lava'
-Target.fedora_deployment_data['lava_test_results_part_attr'] = 'root_part'
-
-# 755 file permissions
-XMOD = stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP | stat.S_IXOTH | stat.S_IROTH
-
-
-def _get_testdef_git_repo(testdef_repo, tmpdir, revision):
-    cwd = os.getcwd()
-    gitdir = os.path.join(tmpdir, 'gittestrepo')
-    try:
-        subprocess.check_call(['git', 'clone', testdef_repo, gitdir])
-        if revision:
-            os.chdir(gitdir)
-            subprocess.check_call(['git', 'checkout', revision])
-        return gitdir
-    except Exception as e:
-        logging.error('Unable to get test definition from git\n' + str(e))
-    finally:
-        os.chdir(cwd)
-
-
-def _get_testdef_bzr_repo(testdef_repo, tmpdir, revision):
-    bzrdir = os.path.join(tmpdir, 'bzrtestrepo')
-    try:
-        # As per bzr revisionspec, '-1' is "The last revision in a
-        # branch".
-        if revision is None:
-            revision = '-1'
-
-        subprocess.check_call(
-            ['bzr', 'branch', '-r', revision, testdef_repo, bzrdir],
-            env={'BZR_HOME': '/dev/null', 'BZR_LOG': '/dev/null'})
-        return bzrdir
-    except Exception as e:
-        logging.error('Unable to get test definition from bzr\n' + str(e))
-
-
-def _get_testdef_tar_repo(testdef_repo, tmpdir):
-    """Extracts the provided encoded tar archive into tmpdir."""
-    tardir = os.path.join(tmpdir, 'tartestrepo')
-    temp_tar = os.path.join(tmpdir, "tar-repo.tar")
-
-    try:
-        if not os.path.isdir(tardir):
-            logging.info("Creating directory to extract the tar archive into.")
-            os.makedirs(tardir)
-
-        encoded_in = StringIO.StringIO(testdef_repo)
-        decoded_out = StringIO.StringIO()
-        base64.decode(encoded_in, decoded_out)
-
-        # The following two operations can also be done in memory
-        # using cStringIO.
-        # At the moment the tar file sent is not big, but that can change.
-        with open(temp_tar, "w") as write_tar:
-            write_tar.write(decoded_out.getvalue())
-
-        with tarfile.open(temp_tar) as tar:
-            tar.extractall(path=tardir)
-    except (OSError, tarfile.TarError) as ex:
-        logging.error("Error extracting the tar archive.\n" + str(ex))
-    finally:
-        # Remove the temporary created tar file after it has been extracted.
-        if os.path.isfile(temp_tar):
-            os.unlink(temp_tar)
-    return tardir
-
-
-def _get_testdef_info(testdef):
-    metadata = {'os': '', 'devices': '', 'environment': ''}
-    metadata['description'] = testdef['metadata'].get('description')
-    metadata['format'] = testdef['metadata'].get('format')
-    version = testdef['metadata'].get('version')
-    metadata['version'] = version and str(version) or version
-
-    # Convert list to comma separated string.
-    if testdef['metadata'].get('os'):
-        metadata['os'] = ','.join(testdef['metadata'].get('os'))
-
-    if testdef['metadata'].get('devices'):
-        metadata['devices'] = ','.join(testdef['metadata'].get('devices'))
-
-    if testdef['metadata'].get('environment'):
-        metadata['environment'] = ','.join(
-            testdef['metadata'].get('environment'))
-
-    return metadata
-
-
-class TestDefinitionLoader(object):
-    """
-    A TestDefinitionLoader knows how to load test definitions from the data
-    provided in the job file.
-    """
-
-    def __init__(self, context, tmpbase):
-        self.testdefs = []
-        self.context = context
-        self.tmpbase = tmpbase
-        self.testdefs_by_uuid = {}
-
-    def _append_testdef(self, testdef_obj):
-        testdef_obj.load_signal_handler()
-        self.testdefs.append(testdef_obj)
-        self.testdefs_by_uuid[testdef_obj.uuid] = testdef_obj
-
-    def load_from_url(self, url):
-        tmpdir = utils.mkdtemp(self.tmpbase)
-        testdef_file = download_image(url, self.context, tmpdir)
-        with open(testdef_file, 'r') as f:
-            logging.info('loading test definition')
-            testdef = yaml.safe_load(f)
-
-        idx = len(self.testdefs)
-
-        testdef_metadata = {'url': url, 'location': 'URL'}
-        testdef_metadata.update(_get_testdef_info(testdef))
-        self._append_testdef(URLTestDefinition(self.context, idx, testdef,
-                                               testdef_metadata))
-
-    def load_from_repo(self, testdef_repo):
-        tmpdir = utils.mkdtemp(self.tmpbase)
-        repo = None
-        info = None
-        if 'git-repo' in testdef_repo:
-            repo = _get_testdef_git_repo(
-                testdef_repo['git-repo'], tmpdir, testdef_repo.get('revision'))
-            name = os.path.splitext(os.path.basename(testdef_repo['git-repo']))[0]
-            info = _git_info(testdef_repo['git-repo'], repo, name)
-
-        if 'bzr-repo' in testdef_repo:
-            repo = _get_testdef_bzr_repo(
-                testdef_repo['bzr-repo'], tmpdir, testdef_repo.get('revision'))
-            name = testdef_repo['bzr-repo'].replace('lp:', '').split('/')[-1]
-            info = _bzr_info(testdef_repo['bzr-repo'], repo, name)
-
-        if 'tar-repo' in testdef_repo:
-            repo = _get_testdef_tar_repo(testdef_repo['tar-repo'], tmpdir)
-            # Default info structure, since we need something, but we have
-            # a tar file in this case.
-            info = {
-                "project_name": "Tar archived repository",
-                "branch_vcs": "tar",
-                "branch_revision": "0",
-                "branch_url": repo
-            }
-
-        if not repo or not info:
-            logging.debug("Unable to identify specified repository. %s" % testdef_repo)
-
-        test = testdef_repo.get('testdef', 'lavatest.yaml')
-        with open(os.path.join(repo, test), 'r') as f:
-            logging.info('loading test definition ...')
-            testdef = yaml.safe_load(f)
-
-        idx = len(self.testdefs)
-        self._append_testdef(
-            RepoTestDefinition(self.context, idx, testdef, repo, info))
-
-
-def _bzr_info(url, bzrdir, name):
-    cwd = os.getcwd()
-    try:
-        os.chdir('%s' % bzrdir)
-        revno = subprocess.check_output(['bzr', 'revno']).strip()
-        return {
-            'project_name': name,
-            'branch_vcs': 'bzr',
-            'branch_revision': revno,
-            'branch_url': url,
-        }
-    finally:
-        os.chdir(cwd)
-
-
-def _git_info(url, gitdir, name):
-    cwd = os.getcwd()
-    try:
-        os.chdir('%s' % gitdir)
-        commit_id = subprocess.check_output(
-            ['git', 'log', '-1', '--pretty=%H']).strip()
-        return {
-            'project_name': name,
-            'branch_vcs': 'git',
-            'branch_revision': commit_id,
-            'branch_url': url,
-        }
-    finally:
-        os.chdir(cwd)
-
-
-class URLTestDefinition(object):
-    """
-    A test definition that was loaded from a URL.
-    """
-
-    def __init__(self, context, idx, testdef, testdef_metadata):
-        self.context = context
-        self.testdef = testdef
-        self.testdef_metadata = testdef_metadata
-        self.idx = idx
-        self.test_run_id = '%s_%s' % (idx, self.testdef['metadata']['name'])
-        self.uuid = str(uuid4())
-        self._sw_sources = []
-        self.handler = None
-
-    def load_signal_handler(self):
-        hook_data = self.testdef.get('handler')
-        if not hook_data:
-            return
-        try:
-            handler_name = hook_data['handler-name']
-            logging.info("Loading handler named %s", handler_name)
-            handler_eps = list(
-                pkg_resources.iter_entry_points(
-                    'lava.signal_handlers', handler_name))
-            if len(handler_eps) == 0:
-                logging.error("No handler named %s found", handler_name)
-                return
-            elif len(handler_eps) > 1:
-                logging.warning(
-                    "Multiple handlers named %s found.  Picking one arbitrarily.",
-                    handler_name)
-            handler_ep = handler_eps[0]
-            logging.info("Loading handler from %s" % handler_ep.dist)
-            handler_cls = handler_ep.load()
-            self.handler = handler_cls(self, **hook_data.get('params', {}))
-        except Exception:
-            logging.exception("loading handler failed")
-
-    def _create_repos(self, testdir):
-        cwd = os.getcwd()
-        try:
-            os.chdir(testdir)
-
-            for repo in self.testdef['install'].get('bzr-repos', []):
-                logging.info("bzr branch %s" % repo)
-                # Pass non-existent BZR_HOME value, or otherwise bzr may
-                # have non-reproducible behavior because it may rely on
-                # bzr whoami value, presence of ssh keys, etc.
-                subprocess.check_call(['bzr', 'branch', repo],
-                                      env={'BZR_HOME': '/dev/null',
-                                           'BZR_LOG': '/dev/null'})
-                name = repo.replace('lp:', '').split('/')[-1]
-                self._sw_sources.append(_bzr_info(repo, name, name))
-
-            for repo in self.testdef['install'].get('git-repos', []):
-                logging.info("git clone %s" % repo)
-                subprocess.check_call(['git', 'clone', repo])
-                name = os.path.splitext(os.path.basename(repo))[0]
-                self._sw_sources.append(_git_info(repo, name, name))
-        finally:
-            os.chdir(cwd)
-
-    def _create_target_install(self, hostdir, targetdir):
-        with open('%s/install.sh' % hostdir, 'w') as f:
-            f.write('set -ex\n')
-            f.write('cd %s\n' % targetdir)
-
-            distro = self.context.client.target_device.deployment_data['distro']
-
-            # generic dependencies - must be named the same across all distros
-            # supported by the testdef
-            deps = self.testdef['install'].get('deps', [])
-
-            # distro-specific dependencies
-            deps = deps + self.testdef['install'].get('deps-' + distro, [])
-
-            if deps:
-                f.write('lava-install-packages ')
-                for dep in deps:
-                    f.write('%s ' % dep)
-                f.write('\n')
-
-            steps = self.testdef['install'].get('steps', [])
-            if steps:
-                for cmd in steps:
-                    f.write('%s\n' % cmd)
-
-    def copy_test(self, hostdir, targetdir):
-        """Copy the files needed to run this test to the device.
-
-        :param hostdir: The location on the device filesystem to copy too.
-        :param targetdir: The location `hostdir` will have when the device
-            boots.
-        """
-        utils.ensure_directory(hostdir)
-        with open('%s/testdef.yaml' % hostdir, 'w') as f:
-            f.write(yaml.dump(self.testdef))
-
-        with open('%s/uuid' % hostdir, 'w') as f:
-            f.write(self.uuid)
-
-        with open('%s/testdef_metadata' % hostdir, 'w') as f:
-            f.write(yaml.safe_dump(self.testdef_metadata))
-
-        if 'install' in self.testdef:
-            self._create_repos(hostdir)
-            self._create_target_install(hostdir, targetdir)
-
-        with open('%s/run.sh' % hostdir, 'w') as f:
-            f.write('set -e\n')
-            f.write('export TESTRUN_ID=%s\n' % self.test_run_id)
-            f.write('cd %s\n' % targetdir)
-            f.write('UUID=`cat uuid`\n')
-            f.write('echo "<LAVA_SIGNAL_STARTRUN $TESTRUN_ID $UUID>"\n')
-            f.write('#wait for an ack from the dispatcher\n')
-            f.write('read\n')
-            steps = self.testdef['run'].get('steps', [])
-            if steps:
-                for cmd in steps:
-                    f.write('%s\n' % cmd)
-            f.write('echo "<LAVA_SIGNAL_ENDRUN $TESTRUN_ID $UUID>"\n')
-            f.write('#wait for an ack from the dispatcher\n')
-            f.write('read\n')
-
-
-class RepoTestDefinition(URLTestDefinition):
-    """
-    A test definition that was loaded from a VCS repository.
-
-    The difference is that the files from the repository are also copied to
-    the device.
-    """
-
-    def __init__(self, context, idx, testdef, repo, info):
-        testdef_metadata = {}
-        testdef_metadata.update({'url': info['branch_url']})
-        testdef_metadata.update({'location': info['branch_vcs'].upper()})
-        testdef_metadata.update(_get_testdef_info(testdef))
-        testdef_metadata.update({'version': info['branch_revision']})
-
-        URLTestDefinition.__init__(self, context, idx, testdef,
-                                   testdef_metadata)
-        self.repo = repo
-        self._sw_sources.append(info)
-
-    def copy_test(self, hostdir, targetdir):
-        shutil.copytree(self.repo, hostdir, symlinks=True)
-        URLTestDefinition.copy_test(self, hostdir, targetdir)
-        logging.info('copied all test files')
-
-
-class cmd_lava_test_shell(BaseAction):
-
-    parameters_schema = {
-        'type': 'object',
-        'properties': {
-            'testdef_urls': {'type': 'array',
-                             'items': {'type': 'string'},
-                             'optional': True},
-            'testdef_repos': {'type': 'array',
-                              'items': {'type': 'object',
-                                        'properties':
-                                        {'git-repo': {'type': 'string',
-                                                'optional': True},
-                                        'bzr-repo': {'type': 'string',
-                                                'optional': True},
-                                        'tar-repo': {'type': 'string',
-                                                'optional': True},
-                                        'revision': {'type': 'string',
-                                                'optional': True},
-                                        'testdef': {'type': 'string',
-                                                'optional': True}
-                                         },
-                                        'additionalProperties': False},
-                              'optional': True
-                              },
-            'timeout': {'type': 'integer', 'optional': True},
-            'role': {'type': 'string', 'optional': True},
-        },
-        'additionalProperties': False,
-    }
-
-    def run(self, testdef_urls=None, testdef_repos=None, timeout=-1):
-        target = self.client.target_device
-
-        testdefs_by_uuid = self._configure_target(target, testdef_urls, testdef_repos)
-
-        signal_director = SignalDirector(self.client, testdefs_by_uuid, self.context)
-
-        with target.runner() as runner:
-            runner.wait_for_prompt(timeout)
-            if self.context.config.lava_proxy:
-                runner._connection.sendline(
-                    "export http_proxy=%s" % self.context.config.lava_proxy)
-            runner._connection.sendline(
-                "%s/bin/lava-test-runner" % target.deployment_data['lava_test_dir'])
-            start = time.time()
-            if timeout == -1:
-                timeout = runner._connection.timeout
-            initial_timeout = timeout
-            signal_director.set_connection(runner._connection)
-            while self._keep_running(runner, timeout, signal_director):
-                elapsed = time.time() - start
-                timeout = int(initial_timeout - elapsed)
-
-        self._bundle_results(target, signal_director, testdefs_by_uuid)
-
-    def _keep_running(self, runner, timeout, signal_director):
-        patterns = [
-            '<LAVA_TEST_RUNNER>: exiting',
-            pexpect.EOF,
-            pexpect.TIMEOUT,
-            '<LAVA_SIGNAL_(\S+) ([^>]+)>',
-            '<LAVA_MULTI_NODE> <LAVA_(\S+) ([^>]+)>',
-        ]
-
-        idx = runner._connection.expect(patterns, timeout=timeout)
-        if idx == 0:
-            logging.info('lava_test_shell seems to have completed')
-        elif idx == 1:
-            logging.warn('lava_test_shell connection dropped')
-        elif idx == 2:
-            logging.warn('lava_test_shell has timed out')
-        elif idx == 3:
-            name, params = runner._connection.match.groups()
-            logging.debug("Received signal <%s>" % name)
-            params = params.split()
-            try:
-                signal_director.signal(name, params)
-            except:
-                logging.exception("on_signal failed")
-            runner._connection.sendline('echo LAVA_ACK')
-            return True
-        elif idx == 4:
-            name, params = runner._connection.match.groups()
-            logging.debug("Received Multi_Node API <LAVA_%s>" % name)
-            params = params.split()
-            ret = False
-            try:
-                ret = signal_director.signal(name, params)
-            except:
-                logging.exception("on_signal(Multi_Node) failed")
-            return ret
-
-        return False
-
-    def _copy_runner(self, mntdir, target):
-        shell = target.deployment_data['lava_test_sh_cmd']
-
-        # Generic scripts
-        scripts_to_copy = glob(os.path.join(LAVA_TEST_DIR, 'lava-*'))
-
-        # Distro-specific scripts override the generic ones
-        distro = target.deployment_data['distro']
-        distro_support_dir = '%s/distro/%s' % (LAVA_TEST_DIR, distro)
-        for script in glob(os.path.join(distro_support_dir, 'lava-*')):
-            scripts_to_copy.append(script)
-
-        for fname in scripts_to_copy:
-            with open(fname, 'r') as fin:
-                foutname = os.path.basename(fname)
-                with open('%s/bin/%s' % (mntdir, foutname), 'w') as fout:
-                    fout.write("#!%s\n\n" % shell)
-                    fout.write(fin.read())
-                    os.fchmod(fout.fileno(), XMOD)
-
-    def _inject_multi_node_api(self, mntdir, target):
-        shell = target.deployment_data['lava_test_sh_cmd']
-
-        # Generic scripts
-        scripts_to_copy = glob(os.path.join(LAVA_MULTI_NODE_TEST_DIR, 'lava-*'))
-
-        for fname in scripts_to_copy:
-            with open(fname, 'r') as fin:
-                foutname = os.path.basename(fname)
-                with open('%s/bin/%s' % (mntdir, foutname), 'w') as fout:
-                    fout.write("#!%s\n\n" % shell)
-                    # Target-specific scripts (add ENV to the generic ones)
-                    if foutname == LAVA_GROUP_FILE:
-                        fout.write('LAVA_GROUP="\n')
-                        if 'roles' in self.context.group_data:
-                            for client_name in self.context.group_data['roles']:
-                                fout.write(r"\t%s\t%s\n" % (client_name, self.context.group_data['roles'][client_name]))
-                        else:
-                            logging.debug("group data MISSING")
-                        fout.write('"\n')
-                    elif foutname == LAVA_ROLE_FILE:
-                        fout.write("TARGET_ROLE='%s'\n" % self.context.test_data.metadata['role'])
-                    elif foutname == LAVA_SELF_FILE:
-                        fout.write("LAVA_HOSTNAME='%s'\n" % self.context.test_data.metadata['target.hostname'])
-                    else:
-                        fout.write("LAVA_TEST_BIN='%s/bin'\n" % target.deployment_data['lava_test_dir'])
-                        fout.write("LAVA_MULTI_NODE_CACHE='%s'\n" % LAVA_MULTI_NODE_CACHE_FILE)
-                        logging_level = self.context.test_data.metadata.get(\
-                            'logging_level', None)
-                        if logging_level and logging_level == 'DEBUG':
-                            fout.write("LAVA_MULTI_NODE_DEBUG='yes'\n")
-                    fout.write(fin.read())
-                    os.fchmod(fout.fileno(), XMOD)
-
-    def _mk_runner_dirs(self, mntdir):
-        utils.ensure_directory('%s/bin' % mntdir)
-        utils.ensure_directory_empty('%s/tests' % mntdir)
-        utils.ensure_directory_empty('%s/results' % mntdir)
-
-    def _configure_target(self, target, testdef_urls, testdef_repos):
-        ldir = target.deployment_data['lava_test_dir']
-
-        results_part = target.deployment_data['lava_test_results_part_attr']
-        results_part = getattr(target.config, results_part)
-
-        with target.file_system(results_part, 'lava') as d:
-            self._mk_runner_dirs(d)
-            self._copy_runner(d, target)
-            if 'target_group' in self.context.test_data.metadata:
-                self._inject_multi_node_api(d, target)
-
-            testdef_loader = TestDefinitionLoader(self.context, target.scratch_dir)
-
-            if testdef_urls:
-                for url in testdef_urls:
-                    testdef_loader.load_from_url(url)
-
-            if testdef_repos:
-                for repo in testdef_repos:
-                    testdef_loader.load_from_repo(repo)
-
-            tdirs = []
-            for testdef in testdef_loader.testdefs:
-                # android mount the partition under /system, while ubuntu
-                # mounts under /, so we have hdir for where it is on the
-                # host and tdir for how the target will see the path
-                hdir = '%s/tests/%s' % (d, testdef.test_run_id)
-                tdir = '%s/tests/%s' % (ldir, testdef.test_run_id)
-                testdef.copy_test(hdir, tdir)
-                tdirs.append(tdir)
-
-            with open('%s/lava-test-runner.conf' % d, 'w') as f:
-                for testdir in tdirs:
-                    f.write('%s\n' % testdir)
-
-        return testdef_loader.testdefs_by_uuid
-
-    def _bundle_results(self, target, signal_director, testdefs_by_uuid):
-        """ Pulls the results from the target device and builds a bundle
-        """
-        results_part = target.deployment_data['lava_test_results_part_attr']
-        results_part = getattr(target.config, results_part)
-        rdir = self.context.host_result_dir
-
-        with target.file_system(results_part, 'lava') as d:
-            results_dir = os.path.join(d, 'results')
-            bundle = lava_test_shell.get_bundle(results_dir, testdefs_by_uuid)
-            # lava/results must be empty, but we keep a copy named
-            # lava/results-XXXXXXXXXX for post-mortem analysis
-            timestamp = datetime.now().strftime("%s")
-            os.rename(results_dir, results_dir + '-' + timestamp)
-            os.mkdir(results_dir)
-
-        signal_director.postprocess_bundle(bundle)
-
-        (fd, name) = tempfile.mkstemp(
-            prefix='lava-test-shell', suffix='.bundle', dir=rdir)
-        with os.fdopen(fd, 'w') as f:
-            DocumentIO.dump(f, bundle)

=== removed directory 'lava_dispatcher/actions/lmp'
=== removed file 'lava_dispatcher/actions/lmp/__init__.py'
--- lava_dispatcher/actions/lmp/__init__.py	2013-09-04 10:19:58 +0000
+++ lava_dispatcher/actions/lmp/__init__.py	1970-01-01 00:00:00 +0000
@@ -1,1 +0,0 @@ 
-__author__ = 'dpigott'

=== removed file 'lava_dispatcher/actions/lmp/board.py'
--- lava_dispatcher/actions/lmp/board.py	2013-09-05 08:59:44 +0000
+++ lava_dispatcher/actions/lmp/board.py	1970-01-01 00:00:00 +0000
@@ -1,132 +0,0 @@ 
-# Copyright (C) 2013 Linaro Limited
-#
-# Author: Dave Pigott <dave.pigott@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import serial
-import json
-import logging
-from serial import (
-    serialutil
-)
-from lava_dispatcher.errors import (
-    CriticalError,
-)
-
-class LAVALmpDeviceSerial(object):
-    def __init__(self, serialno, board_type):
-        device_map = {
-            "sdmux": "0a",
-            "sata": "19",
-            "lsgpio": "09",
-            "hdmi": "0c",
-            "usb": "04"
-        }
-        self.serialno = "LL" + device_map[board_type] + serialno.zfill(12)
-        logging.debug("LMP Serial #: %s" % self.serialno)
-        self.lmpType = "org.linaro.lmp." + board_type
-        self.board_type = board_type
-        try:
-            self.port = serial.Serial("/dev/serial/by-id/usb-Linaro_Ltd_LavaLMP_" + self.serialno + "-if00", timeout=1)
-        except serial.serialutil.SerialException as e:
-            logging.error("LMP: Error opening {0:s}: {1:s}".format(self.serialno, e))
-            raise
-        self.START_FRAME = '\x02'
-        self.END_FRAME = '\x04'
-        self.send_frame('{"schema":"org.linaro.lmp.info"}')
-        message = self.get_response("board")
-        if message['serial'] != self.serialno:
-            raise CriticalError("Lmp %s not connected" % serial)
-        # With the sdmux, we must wait until the device has switched to the requested state. Not all Lmp boards provide
-        # the required state information in the report
-        # TODO: Fix firmware so that they all do
-        if board_type == "sdmux":
-            self.wait_for_confirmation = True
-        else:
-            self.wait_for_confirmation = False
-
-    def send_command(self, mode, selection):
-        message = '{"schema":"' + self.lmpType + '",' + \
-            '"serial":"' + self.serialno + '",' + \
-            '"modes":[{"name":"' + mode + '",' + \
-            '"option":"' + selection + '"}]}'
-
-        self.send_frame(message)
-
-        if self.wait_for_confirmation:
-            device_in_mode = False
-
-            while not device_in_mode:
-                try:
-                    response = self.get_frame()
-                except ValueError as e:
-                    logging.warning("LMP Frame read error: %s" % e)
-                    continue
-                else:
-                    for i in response["report"]:
-                        if i["name"] == "modes":
-                            modes = dict(i)
-                            for j in modes["modes"]:
-                                state = dict(j)
-                                if state["name"] == mode and state["mode"] == selection:
-                                    logging.debug("LMP %s: %s now in mode %s" % (self.board_type, mode, selection))
-                                    device_in_mode = True
-
-    def send_frame(self, command):
-        logging.debug("LMP: Sending %s" % command)
-        payload = self.START_FRAME + command + self.END_FRAME
-        self.port.write(payload)
-
-    def get_response(self, schema):
-        got_schema = False
-
-        result = self.get_frame()
-
-        while not got_schema:
-            if result['schema'] == "org.linaro.lmp." + schema:
-                got_schema = True
-            else:
-                result = self.get_frame()
-
-        return result
-
-    def get_frame(self):
-        char = self.port.read()
-
-        while char != self.START_FRAME:
-            char = self.port.read()
-
-        response = ""
-
-        while char != self.END_FRAME:
-            char = self.port.read()
-            if char != self.END_FRAME:
-                response += char
-
-        logging.debug("LMP: Got %s" % response)
-
-        return json.loads(response)
-
-    def close(self):
-        self.port.close()
-
-
-def lmp_send_command(serial, lmp_type, mode, state):
-    lmp = LAVALmpDeviceSerial(serial, lmp_type)
-    lmp.send_command(mode, state)
-    lmp.close()

=== removed file 'lava_dispatcher/actions/lmp/ethsata.py'
--- lava_dispatcher/actions/lmp/ethsata.py	2013-09-04 15:09:05 +0000
+++ lava_dispatcher/actions/lmp/ethsata.py	1970-01-01 00:00:00 +0000
@@ -1,29 +0,0 @@ 
-# Copyright (C) 2013 Linaro Limited
-#
-# Author: Dave Pigott <dave.pigott@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-from lava_dispatcher.actions.lmp.board import lmp_send_command
-
-
-def disconnect(serial):
-    lmp_send_command(serial, "sata", "sata", "disconnect")
-
-
-def passthru(serial):
-    lmp_send_command(serial, "sata", "sata", "passthru")

=== removed file 'lava_dispatcher/actions/lmp/hdmi.py'
--- lava_dispatcher/actions/lmp/hdmi.py	2013-09-04 15:09:05 +0000
+++ lava_dispatcher/actions/lmp/hdmi.py	1970-01-01 00:00:00 +0000
@@ -1,33 +0,0 @@ 
-# Copyright (C) 2013 Linaro Limited
-#
-# Author: Dave Pigott <dave.pigott@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-from lava_dispatcher.actions.lmp.board import lmp_send_command
-
-
-def disconnect(serial):
-    lmp_send_command(serial, "hdmi", "hdmi", "disconnect")
-
-
-def passthru(serial):
-    lmp_send_command(serial, "hdmi", "hdmi", "passthru")
-
-
-def fake(serial):
-    lmp_send_command(serial, "hdmi", "hdmi", "fake")

=== removed file 'lava_dispatcher/actions/lmp/lsgpio.py'
--- lava_dispatcher/actions/lmp/lsgpio.py	2013-09-04 15:09:05 +0000
+++ lava_dispatcher/actions/lmp/lsgpio.py	1970-01-01 00:00:00 +0000
@@ -1,45 +0,0 @@ 
-# Copyright (C) 2013 Linaro Limited
-#
-# Author: Dave Pigott <dave.pigott@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-from lava_dispatcher.actions.lmp.board import lmp_send_command
-
-
-def audio_disconnect(serial):
-    lmp_send_command(serial, "lsgpio", "audio", "disconnect")
-
-
-def audio_passthru(serial):
-    lmp_send_command(serial, "lsgpio", "audio", "passthru")
-
-
-def a_dir_in(serial):
-    lmp_send_command(serial, "lsgpio", "a-dir", "in")
-
-
-def a_dir_out(serial):
-    lmp_send_command(serial, "lsgpio", "a-dir", "out")
-
-
-def b_dir_in(serial):
-    lmp_send_command(serial, "lsgpio", "b-dir", "in")
-
-
-def b_dir_out(serial):
-    lmp_send_command(serial, "lsgpio", "b-dir", "out")

=== removed file 'lava_dispatcher/actions/lmp/sdmux.py'
--- lava_dispatcher/actions/lmp/sdmux.py	2013-09-04 15:09:05 +0000
+++ lava_dispatcher/actions/lmp/sdmux.py	1970-01-01 00:00:00 +0000
@@ -1,53 +0,0 @@ 
-# Copyright (C) 2013 Linaro Limited
-#
-# Author: Dave Pigott <dave.pigott@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-from lava_dispatcher.actions.lmp.board import lmp_send_command
-
-
-def dut_disconnect(serial):
-    lmp_send_command(serial, "sdmux", "dut", "disconnect")
-
-
-def dut_usda(serial):
-    lmp_send_command(serial, "sdmux", "dut", "uSDA")
-
-
-def dut_usdb(serial):
-    lmp_send_command(serial, "sdmux", "dut", "uSDB")
-
-
-def host_disconnect(serial):
-    lmp_send_command(serial, "sdmux", "host", "disconnect")
-
-
-def host_usda(serial):
-    lmp_send_command(serial, "sdmux", "host", "uSDA")
-
-
-def host_usdb(serial):
-    lmp_send_command(serial, "sdmux", "host", "uSDB")
-
-
-def dut_power_off(serial):
-    lmp_send_command(serial, "sdmux", "dut-power", "short-for-off")
-
-
-def dut_power_on(serial):
-    lmp_send_command(serial, "sdmux", "dut-power", "short-for-on")

=== removed file 'lava_dispatcher/actions/lmp/usb.py'
--- lava_dispatcher/actions/lmp/usb.py	2013-09-04 15:09:05 +0000
+++ lava_dispatcher/actions/lmp/usb.py	1970-01-01 00:00:00 +0000
@@ -1,33 +0,0 @@ 
-# Copyright (C) 2013 Linaro Limited
-#
-# Author: Dave Pigott <dave.pigott@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-from lava_dispatcher.actions.lmp.board import lmp_send_command
-
-
-def device(serial):
-   lmp_send_command(serial, "usb", "usb", "device")
-
-
-def host(serial):
-    lmp_send_command(serial, "usb", "usb", "host")
-
-
-def disconnect(serial):
-    lmp_send_command(serial, "usb", "usb", "disconnect")

=== removed directory 'lava_dispatcher/client'
=== removed file 'lava_dispatcher/client/__init__.py'
--- lava_dispatcher/client/__init__.py	2011-11-24 03:00:54 +0000
+++ lava_dispatcher/client/__init__.py	1970-01-01 00:00:00 +0000
@@ -1,19 +0,0 @@ 
-# Copyright (C) 2011 Linaro Limited
-#
-# Author: Michael Hudson-Doyle <michael.hudson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.

=== removed file 'lava_dispatcher/client/base.py'
--- lava_dispatcher/client/base.py	2013-09-12 17:10:44 +0000
+++ lava_dispatcher/client/base.py	1970-01-01 00:00:00 +0000
@@ -1,636 +0,0 @@ 
-# Copyright (C) 2011 Linaro Limited
-#
-# Author: Michael Hudson-Doyle <michael.hudson@linaro.org>
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import commands
-import contextlib
-import logging
-import pexpect
-import sys
-import time
-import traceback
-
-import lava_dispatcher.utils as utils
-
-from lava_dispatcher.errors import (
-    NetworkError,
-    OperationFailed,
-    CriticalError,
-    ADBConnectError,
-)
-
-
-def wait_for_prompt(connection, prompt_pattern, timeout):
-    # One of the challenges we face is that kernel log messages can appear
-    # half way through a shell prompt.  So, if things are taking a while,
-    # we send a newline along to maybe provoke a new prompt.  We wait for
-    # half the timeout period and then wait for one tenth of the timeout
-    # 6 times (so we wait for 1.1 times the timeout period overall).
-    prompt_wait_count = 0
-    if timeout == -1:
-        timeout = connection.timeout
-    partial_timeout = timeout / 2.0
-    while True:
-        try:
-            connection.expect(prompt_pattern, timeout=partial_timeout)
-        except pexpect.TIMEOUT:
-            if prompt_wait_count < 6:
-                logging.warning('Sending newline in case of corruption.')
-                prompt_wait_count += 1
-                partial_timeout = timeout / 10
-                connection.sendline('')
-                continue
-            else:
-                raise
-        else:
-            break
-
-
-class CommandRunner(object):
-    """A convenient way to run a shell command and wait for a shell prompt.
-
-    The main interface is run().  Subclasses exist to (a) be more conveniently
-    constructed in some situations and (b) define higher level functions that
-    involve executing multiple commands.
-    """
-
-    def __init__(self, connection, prompt_str, prompt_str_includes_rc):
-        """
-
-        :param connection: A pexpect.spawn-like object.
-        :param prompt_str: The shell prompt to wait for.
-        :param prompt_str_includes_rc: Whether prompt_str includes a pattern
-            matching the return code of the command.
-        """
-        self._connection = connection
-        self._prompt_str = prompt_str
-        self._prompt_str_includes_rc = prompt_str_includes_rc
-        self.match_id = None
-        self.match = None
-
-    def wait_for_prompt(self, timeout=-1):
-        wait_for_prompt(self._connection, self._prompt_str, timeout)
-
-    def run(self, cmd, response=None, timeout=-1,
-            failok=False, wait_prompt=True):
-        """Run `cmd` and wait for a shell response.
-
-        :param cmd: The command to execute.
-        :param response: A pattern or sequences of patterns to pass to
-            .expect().
-        :param timeout: How long to wait for 'response' (if specified) and the
-            shell prompt, defaulting to forever.
-        :param failok: The command can fail or not, if it is set False and
-            command fail, an OperationFail exception will raise
-        :return: The exit value of the command, if wait_for_rc not explicitly
-            set to False during construction.
-        """
-        self._connection.empty_buffer()
-        self._connection.sendline(cmd)
-        start = time.time()
-        if response is not None:
-            self.match_id = self._connection.expect(response, timeout=timeout)
-            self.match = self._connection.match
-            if self.match == pexpect.TIMEOUT:
-                return None
-            # If a non-trivial timeout was specified, it is held to apply to
-            # the whole invocation, so now reduce the time we'll wait for the
-            # shell prompt.
-            if timeout > 0:
-                timeout -= time.time() - start
-                # But not too much; give at least a little time for the shell
-                # prompt to appear.
-                if timeout < 1:
-                    timeout = 1
-        else:
-            self.match_id = None
-            self.match = None
-
-        if wait_prompt:
-            self.wait_for_prompt(timeout)
-
-            if self._prompt_str_includes_rc:
-                rc = int(self._connection.match.group(1))
-                if rc != 0 and not failok:
-                    raise OperationFailed(
-                        "executing %r failed with code %s" % (cmd, rc))
-            else:
-                rc = None
-        else:
-            rc = None
-
-        return rc
-
-
-class NetworkCommandRunner(CommandRunner):
-    """A CommandRunner with some networking utility methods."""
-
-    def __init__(self, client, prompt_str, prompt_str_includes_rc):
-        CommandRunner.__init__(
-            self, client.proc, prompt_str,
-            prompt_str_includes_rc=prompt_str_includes_rc)
-        self._client = client
-
-    def get_target_ip(self):
-        logging.info("Waiting for network to come up")
-        try:
-            self.wait_network_up()
-        except NetworkError:
-            logging.exception("Unable to reach LAVA server")
-            raise
-
-        pattern1 = "<(\d?\d?\d?\.\d?\d?\d?\.\d?\d?\d?\.\d?\d?\d?)>"
-        cmd = ("ifconfig %s | grep 'inet addr' | awk -F: '{print $2}' |"
-               "awk '{print \"<\" $1 \">\"}'" %
-               self._client.config.default_network_interface)
-        self.run(
-            cmd, [pattern1, pexpect.EOF, pexpect.TIMEOUT], timeout=60)
-        if self.match_id != 0:
-            msg = "Unable to determine target image IP address"
-            logging.error(msg)
-            raise CriticalError(msg)
-
-        ip = self.match.group(1)
-        logging.debug("Target image IP is %s" % ip)
-        return ip
-
-    def _check_network_up(self):
-        """Internal function for checking network once."""
-        lava_server_ip = self._client.context.config.lava_server_ip
-        self.run(
-            "LC_ALL=C ping -W4 -c1 %s" % lava_server_ip,
-            ["1 received|1 packets received", "0 received|0 packets received", "Network is unreachable"],
-            timeout=60, failok=True)
-        if self.match_id == 0:
-            return True
-        else:
-            return False
-
-    def wait_network_up(self, timeout=300):
-        """Wait until the networking is working."""
-        now = time.time()
-        while time.time() < now + timeout:
-            if self._check_network_up():
-                return
-        raise NetworkError
-
-
-class TesterCommandRunner(CommandRunner):
-    """A CommandRunner to use when the board is booted into the test image.
-
-    See `LavaClient.tester_session`.
-    """
-
-    def __init__(self, client):
-        CommandRunner.__init__(
-            self,
-            client.proc,
-            client.target_device.deployment_data['TESTER_PS1_PATTERN'],
-            prompt_str_includes_rc=client.target_device.deployment_data[
-                'TESTER_PS1_INCLUDES_RC'])
-
-    def export_display(self):
-        self.run("su - linaro -c 'DISPLAY=:0 xhost local:'", failok=True)
-        self.run("export DISPLAY=:0")
-
-
-class AndroidTesterCommandRunner(NetworkCommandRunner):
-    """A CommandRunner to use when the board is booted into the android image.
-
-    See `LavaClient.android_tester_session`.
-    """
-
-    def __init__(self, client):
-        super(AndroidTesterCommandRunner, self).__init__(
-            client, client.target_device.deployment_data['TESTER_PS1_PATTERN'],
-            prompt_str_includes_rc=client.target_device.deployment_data['TESTER_PS1_INCLUDES_RC'])
-        self.dev_name = None
-
-    def connect(self):
-        if self._client.target_device.config.android_adb_over_tcp:
-            self._setup_adb_over_tcp()
-        elif self._client.target_device.config.android_adb_over_usb:
-            self._setup_adb_over_usb()
-        else:
-            raise CriticalError('ADB not configured for TCP or USB')
-
-    def _setup_adb_over_tcp(self):
-        logging.info("adb connect over default network interface")
-        self.dev_ip = self.get_default_nic_ip()
-        if self.dev_ip is None:
-            raise OperationFailed("failed to get board ip address")
-        try:
-            ## just disconnect the adb connection in case is remained
-            ## by last action or last job
-            ## that connection should be expired already
-            self.android_adb_over_tcp_disconnect()
-        except:
-            ## ignore all exception
-            ## this just in case of exception
-            pass
-        self.android_adb_over_tcp_connect()
-        self.wait_until_attached()
-
-    def _setup_adb_over_usb(self):
-        self.run('getprop ro.serialno', response=['[0-9A-Fa-f]{16}'])
-        self.dev_name = self.match.group(0)
-
-    def disconnect(self):
-        if self._client.target_device.config.android_adb_over_tcp:
-            self.android_adb_over_tcp_disconnect()
-
-    # adb cound be connected through network
-    def android_adb_over_tcp_connect(self):
-        dev_ip = self.dev_ip
-        pattern1 = "connected to (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5})"
-        pattern2 = "already connected to (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5})"
-        pattern3 = "unable to connect to"
-
-        adb_port = self._client.config.android_adb_port
-        cmd = "adb connect %s:%s" % (dev_ip, adb_port)
-        logging.info("Execute adb command on host: %s" % cmd)
-        adb_proc = pexpect.spawn(cmd, timeout=300, logfile=sys.stdout)
-        match_id = adb_proc.expect([pattern1, pattern2, pattern3, pexpect.EOF])
-        if match_id in [0, 1]:
-            self.dev_name = adb_proc.match.groups()[0]
-        else:
-            raise ADBConnectError(('Failed to connected to device with'
-                                   ' command:%s') % cmd)
-
-    def android_adb_over_tcp_disconnect(self):
-        dev_ip = self.dev_ip
-        adb_port = self._client.config.android_adb_port
-        cmd = "adb disconnect %s:%s" % (dev_ip, adb_port)
-        logging.info("Execute adb command on host: %s" % cmd)
-        pexpect.run(cmd, timeout=300, logfile=sys.stdout)
-
-    def get_default_nic_ip(self):
-        network_interface = self._client.get_android_adb_interface()
-        try:
-            ip = self._get_default_nic_ip_by_ifconfig(network_interface)
-        except:
-            logging.exception("_get_default_nic_ip_by_ifconfig failed")
-            return None
-
-        return ip
-
-    def _get_default_nic_ip_by_ifconfig(self, nic_name):
-        # Check network ip and setup adb connection
-        try:
-            self.wait_network_up()
-        except:
-            logging.warning(traceback.format_exc())
-            return None
-        ip_pattern = "%s: ip (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) mask" % nic_name
-        try:
-            self.run(
-                "ifconfig %s" % nic_name, [ip_pattern, pexpect.EOF], timeout=60)
-        except Exception as e:
-            raise NetworkError("ifconfig can not match ip pattern for %s:%s" % (nic_name, e))
-
-        if self.match_id == 0:
-            match_group = self.match.groups()
-            if len(match_group) > 0:
-                return match_group[0]
-        return None
-
-    def wait_until_attached(self):
-        for count in range(3):
-            if self.check_device_state():
-                return
-            time.sleep(3)
-
-        raise ADBConnectError(
-            "The android device(%s) isn't attached" % self._client.hostname)
-
-    def wait_home_screen(self):
-        timeout = self._client.config.android_home_screen_timeout
-        activity_pat = self._client.config.android_wait_for_home_screen_activity
-        #waiting for the home screen displayed
-        try:
-            self.run('logcat -s ActivityManager:I',
-                     response=[activity_pat],
-                     timeout=timeout, wait_prompt=False)
-        except pexpect.TIMEOUT:
-            msg = "The home screen was not displayed"
-            logging.critical(msg)
-            raise CriticalError(msg)
-        finally:
-            #send ctrl+c to exit the logcat command,
-            #and make the latter command can be run on the normal
-            #command line session, instead of the session of logcat command
-            self._connection.sendcontrol("c")
-            self.run('')
-
-    def check_device_state(self):
-        (rc, output) = commands.getstatusoutput('adb devices')
-        if rc != 0:
-            return False
-        expect_line = '%s\tdevice' % self.dev_name
-        for line in output.splitlines():
-            if line.strip() == expect_line:
-                return True
-        return False
-
-
-class LavaClient(object):
-    """
-    LavaClient manipulates the target board, bootup, reset, power off the
-    board, sends commands to board to execute.
-
-    The main interfaces to execute commands on the board are the \*_session()
-    methods.  These should be used as context managers, for example::
-
-        with client.tester_session() as session:
-            session.run('ls')
-
-    Each method makes sure the board is booted into the appropriate state
-    (tester image, chrooted into a partition, etc) and additionally
-    android_tester_session connects to the board via adb while in the 'with'
-    block.
-    """
-
-    def __init__(self, context, config):
-        self.context = context
-        self.config = config
-        self.hostname = config.hostname
-        self.proc = None
-        # used for apt-get in lava-test.py
-        self.aptget_cmd = "apt-get"
-        self.target_device = None
-
-    @contextlib.contextmanager
-    def tester_session(self):
-        """A session that can be used to run commands booted into the test
-        image."""
-        try:
-            self._in_test_shell()
-        except OperationFailed:
-            self.boot_linaro_image()
-        yield TesterCommandRunner(self)
-
-    @contextlib.contextmanager
-    def android_tester_session(self):
-        """A session that can be used to run commands booted into the android
-        test image.
-
-        Additionally, adb is connected while in the with block using this
-        manager.
-        """
-        try:
-            self._in_test_shell()
-        except OperationFailed:
-            self.boot_linaro_android_image()
-
-        session = AndroidTesterCommandRunner(self)
-        session.connect()
-
-        try:
-            yield session
-        finally:
-            session.disconnect()
-
-    def reliable_session(self):
-        """
-        Return a session rooted in the rootfs to be tested where networking is
-        guaranteed to work.
-        """
-        raise NotImplementedError(self.reliable_session)
-
-    def _in_test_shell(self):
-        """
-        Check that we are in a shell on the test image
-        """
-        if self.proc is None:
-            raise OperationFailed
-        self.proc.sendline("")
-        prompt = self.target_device.deployment_data['TESTER_PS1_PATTERN']
-        match_id = self.proc.expect([prompt, pexpect.TIMEOUT], timeout=10)
-        if match_id == 1:
-            raise OperationFailed
-
-    def setup_proxy(self, prompt_str):
-        lava_proxy = self.context.config.lava_proxy
-        if lava_proxy:
-            logging.info("Setting up http proxy")
-            # haven't included Android support yet
-            # Timeout is 30 seconds because of some underlying
-            # problem that causes these commands to sometimes
-            # take around 15-20 seconds.
-            self.proc.sendline("export http_proxy=%s" % lava_proxy)
-            self.proc.expect(prompt_str, timeout=30)
-            self.aptget_cmd = ' '.join([self.aptget_cmd,
-                                        "-o Acquire::http::proxy=%s" % lava_proxy])
-
-    def boot_master_image(self):
-        raise NotImplementedError(self.boot_master_image)
-
-    def _boot_linaro_image(self):
-        pass
-
-    def _boot_linaro_android_image(self):
-        pass
-
-    def boot_linaro_image(self):
-        """
-        Reboot the system to the test image
-        """
-        logging.info("Boot the test image")
-        boot_attempts = self.config.boot_retries
-        attempts = 0
-        in_linaro_image = False
-        while (attempts < boot_attempts) and (not in_linaro_image):
-            logging.info("Booting the test image. Attempt: %d" % (attempts + 1))
-            timeout = self.config.boot_linaro_timeout
-            TESTER_PS1_PATTERN = self.target_device.deployment_data[
-                'TESTER_PS1_PATTERN']
-
-            try:
-                self._boot_linaro_image()
-            except (OperationFailed, pexpect.TIMEOUT) as e:
-                msg = "Boot linaro image failed: %s" % e
-                logging.info(msg)
-                attempts += 1
-                continue
-
-            try:
-                wait_for_prompt(self.proc, TESTER_PS1_PATTERN, timeout=timeout)
-            except pexpect.TIMEOUT as e:
-                msg = "Timeout waiting for boot prompt: %s" % e
-                logging.info(msg)
-                attempts += 1
-                continue
-
-            self.setup_proxy(TESTER_PS1_PATTERN)
-            logging.info("System is in test image now")
-            in_linaro_image = True
-
-        if not in_linaro_image:
-            msg = "Could not get the test image booted properly"
-            logging.critical(msg)
-            raise CriticalError(msg)
-
-    def get_www_scratch_dir(self):
-        """ returns a temporary directory available for downloads that gets
-        deleted when the process exits """
-        return utils.mkdtemp(self.context.config.lava_image_tmpdir)
-
-    def get_test_data_attachments(self):
-        """returns attachments to go in the "lava_results" test run"""
-        return []
-
-    def retrieve_results(self, result_disk):
-        raise NotImplementedError(self.retrieve_results)
-
-    def finish(self):
-        pass
-
-    # Android stuff
-
-    def get_android_adb_interface(self):
-        return self.config.default_network_interface
-
-    def boot_linaro_android_image(self, adb_check=False):
-        """Reboot the system to the test android image."""
-        boot_attempts = self.config.boot_retries
-        attempts = 0
-        in_linaro_android_image = False
-
-        while (attempts < boot_attempts) and (not in_linaro_android_image):
-            logging.info("Booting the Android test image. Attempt: %d" %
-                         (attempts + 1))
-            TESTER_PS1_PATTERN = self.target_device.deployment_data[
-                'TESTER_PS1_PATTERN']
-            timeout = self.config.android_boot_prompt_timeout
-
-            try:
-                self._boot_linaro_android_image()
-            except (OperationFailed, pexpect.TIMEOUT) as e:
-                msg = "Failed to boot the Android test image: %s" % e
-                logging.info(msg)
-                attempts += 1
-                continue
-
-            try:
-                wait_for_prompt(self.proc, TESTER_PS1_PATTERN, timeout=timeout)
-            except pexpect.TIMEOUT:
-                msg = "Timeout waiting for boot prompt"
-                logging.info(msg)
-                attempts += 1
-                continue
-
-            #TODO: set up proxy
-
-            if not self.config.android_adb_over_usb:
-                try:
-                    self._disable_adb_over_usb()
-                except (OperationFailed, pexpect.TIMEOUT) as e:
-                    msg = "Failed to disable adb: %s" % e
-                    logging.info(msg)
-                    attempts += 1
-                    continue
-
-            if self.config.android_disable_suspend:
-                try:
-                    self._disable_suspend()
-                except (OperationFailed, pexpect.TIMEOUT) as e:
-                    msg = "Failed to disable suspend: %s" % e
-                    logging.info(msg)
-                    attempts += 1
-                    continue
-
-            if self.config.enable_network_after_boot_android:
-                time.sleep(1)
-                try:
-                    self._enable_network()
-                except (OperationFailed, pexpect.TIMEOUT) as e:
-                    msg = "Failed to enable network: %s" % e
-                    logging.info(msg)
-                    attempts += 1
-                    continue
-
-            if self.config.android_adb_over_tcp:
-                try:
-                    self._enable_adb_over_tcp()
-                except (OperationFailed, pexpect.TIMEOUT) as e:
-                    msg = "Failed to enable adp over tcp: %s" % e
-                    logging.info(msg)
-                    attempts += 1
-                    continue
-
-            in_linaro_android_image = True
-
-        if not in_linaro_android_image:
-            msg = "Could not get the Android test image booted properly"
-            logging.critical(msg)
-            raise CriticalError(msg)
-
-        #check if the adb connection can be created.
-        #by adb connect dev_ip command
-        if adb_check:
-            try:
-                session = AndroidTesterCommandRunner(self)
-                session.connect()
-            finally:
-                session.disconnect()
-
-    def _disable_suspend(self):
-        """ disable the suspend of images.
-        this needs wait unitl the home screen displayed"""
-        session = AndroidTesterCommandRunner(self)
-        try:
-            if self.config.android_wait_for_home_screen:
-                session.wait_home_screen()
-        except:
-            # ignore home screen exception if it is a health check job.
-            if not ('health_check' in self.context.job_data and self.context.job_data["health_check"] is True):
-                raise
-            else:
-                logging.info("Skip raising exception on the home screen has not displayed for health check jobs")
-        # When disablesuspend executes it waits for home screen unless
-        # --no-wait is passed.
-        session.run(
-            '/system/bin/disablesuspend.sh --no-wait',
-            timeout=self.config.disablesuspend_timeout)
-
-    def _enable_network(self):
-        session = AndroidTesterCommandRunner(self)
-        session.run("netcfg", timeout=20)
-        session.run("netcfg %s up" % self.config.default_network_interface, timeout=20)
-        session.run("netcfg %s dhcp" % self.config.default_network_interface, timeout=300)
-        session.run("ifconfig " + self.config.default_network_interface, timeout=20)
-
-    def _enable_adb_over_tcp(self):
-        logging.info("Enabling ADB over TCP")
-        session = AndroidTesterCommandRunner(self)
-        adb_port = self.config.android_adb_port
-        session.run('setprop service.adb.tcp.port %s' % adb_port)
-        session.run('stop adbd')
-        session.run('start adbd')
-        try:
-            session.connect()
-        finally:
-            session.disconnect()
-
-
-    def _disable_adb_over_usb(self):
-        logging.info("Disabling adb over USB")
-        session = AndroidTesterCommandRunner(self)
-        session.run('echo 0>/sys/class/android_usb/android0/enable')

=== removed file 'lava_dispatcher/client/lmc_utils.py'
--- lava_dispatcher/client/lmc_utils.py	2013-09-09 04:48:29 +0000
+++ lava_dispatcher/client/lmc_utils.py	1970-01-01 00:00:00 +0000
@@ -1,207 +0,0 @@ 
-from commands import getoutput, getstatusoutput
-import contextlib
-import logging
-import pexpect
-import re
-import os
-from tempfile import mkdtemp
-
-from lava_dispatcher.downloader import (
-    download_image,
-)
-from lava_dispatcher.utils import (
-    logging_system,
-)
-
-
-def generate_image(client, hwpack_url, rootfs_url, outdir, bootloader='u_boot', rootfstype=None,
-                   extra_boot_args=None, image_size=None):
-    """Generate image from a hwpack and rootfs url
-
-    :param hwpack_url: url of the Linaro hwpack to download
-    :param rootfs_url: url of the Linaro image to download
-    """
-    logging.info("preparing to deploy on %s" % client.config.hostname)
-    logging.info("  hwpack: %s" % hwpack_url)
-    logging.info("  rootfs: %s" % rootfs_url)
-
-    logging.info("Downloading the %s file" % hwpack_url)
-    hwpack_path = download_image(hwpack_url, client.context, outdir, decompress=False)
-
-    logging.info("Downloading the %s file" % rootfs_url)
-    rootfs_path = download_image(rootfs_url, client.context, outdir, decompress=False)
-
-    logging.info("linaro-media-create version information")
-    cmd = "sudo linaro-media-create -v"
-    rc, output = getstatusoutput(cmd)
-    metadata = client.context.test_data.get_metadata()
-    metadata['target.linaro-media-create-version'] = output
-    client.context.test_data.add_metadata(metadata)
-
-    image_file = os.path.join(outdir, "lava.img")
-
-    logging.info("client.device_type = %s" % client.config.device_type)
-
-    cmd = ("sudo flock /var/lock/lava-lmc.lck linaro-media-create --hwpack-force-yes --dev %s "
-           "--image-file %s --binary %s --hwpack %s --image-size 3G --bootloader %s" %
-           (client.config.lmc_dev_arg, image_file, rootfs_path, hwpack_path, bootloader))
-    if rootfstype is not None:
-        cmd += ' --rootfs ' + rootfstype
-    if image_size is not None:
-        cmd += ' --image-size ' + image_size
-    if extra_boot_args is not None:
-        cmd += ' --extra-boot-args "%s"' % extra_boot_args
-    logging.info("Executing the linaro-media-create command")
-    logging.info(cmd)
-
-    _run_linaro_media_create(client.context, cmd)
-    return image_file
-
-
-def generate_fastmodel_image(context, hwpack, rootfs, odir, bootloader='u_boot', size="2000M"):
-    cmd = ("flock /var/lock/lava-lmc.lck sudo linaro-media-create "
-           "--dev vexpress --output-directory %s --image-size %s "
-           "--hwpack %s --binary %s --hwpack-force-yes --bootloader %s" % (odir, size, hwpack, rootfs, bootloader))
-    logging.info("Generating fastmodel image with: %s" % cmd)
-    _run_linaro_media_create(context, cmd)
-
-
-def generate_android_image(context, device, boot, data, system, ofile, size="2000M"):
-    cmd = ("flock /var/lock/lava-lmc.lck linaro-android-media-create "
-           "--dev %s --image_file %s --image_size %s "
-           "--boot %s --userdata %s --system %s" % (device, ofile, size, boot, data, system))
-    logging.info("Generating android image with: %s" % cmd)
-    _run_linaro_media_create(context, cmd)
-
-
-def get_partition_offset(image, partno):
-    cmd = 'parted %s -m -s unit b print' % image
-    part_data = getoutput(cmd)
-    pattern = re.compile('%d:([0-9]+)B:' % partno)
-    for line in part_data.splitlines():
-        found = re.match(pattern, line)
-        if found:
-            return found.group(1)
-    return 0
-
-
-@contextlib.contextmanager
-def image_partition_mounted(image_file, partno):
-    mntdir = mkdtemp()
-    image = image_file
-    offset = get_partition_offset(image, partno)
-    mount_cmd = "sudo mount -o loop,offset=%s %s %s" % (offset, image, mntdir)
-    rc = logging_system(mount_cmd)
-    if rc != 0:
-        os.rmdir(mntdir)
-        raise RuntimeError("Unable to mount image %s at offset %s" % (
-            image, offset))
-    try:
-        yield mntdir
-    finally:
-        logging_system('sudo umount ' + mntdir)
-        logging_system('rm -rf ' + mntdir)
-
-
-def _run_linaro_media_create(context, cmd):
-    """Run linaro-media-create and accept licenses thrown up in the process.
-    """
-    proc = context.spawn(cmd)
-
-    # This code is a bit out of control.  It describes a state machine.  Each
-    # state has a name, a mapping patterns to wait for -> state to move to, a
-    # timeout for how long to wait for said pattern and optionally some input
-    # to send to l-m-c when you enter the step.
-
-    # The basic outline is this:
-
-    # We wait for l-m-c to actually start.  This has an enormous timeout,
-    # because 'cmd' starts with 'flock /var/lock/lava-lmc.lck' and when lots
-    # of jobs start at the same time, it can be a long time before the lock is
-    # acquired.
-
-    # Once its going, we watch for a couple of key phrases that suggets a
-    # license popup has appeared.  The next few states navigate through the
-    # dialogs and then accept the license.  The 'say-yes' state has extra fun
-    # stuff to try to move to a state where the "<Ok>" button is highlighted
-    # before pressing space (the acceptance dialogs are not consistent about
-    # whether <Ok> is the default or not!).
-
-    states = {
-        'waiting': {
-            'expectations': {
-                "linaro-hwpack-install": 'default',
-            },
-            'timeout': 86400,
-        },
-        'default': {
-            'expectations': {
-                "TI TSPA Software License Agreement": 'accept-tspa',
-                "SNOWBALL CLICK-WRAP": 'accept-snowball',
-                "LIMITED LICENSE AGREEMENT FOR APPLICATION  DEVELOPERS": 'accept-snowball',
-            },
-            'timeout': 3600,
-        },
-        'accept-tspa': {
-            'expectations': {"<Ok>": 'accept-tspa-1'},
-            'timeout': 1,
-        },
-        'accept-tspa-1': {
-            'input': "\t ",
-            'expectations': {
-                "Accept TI TSPA Software License Agreement": 'say-yes',
-            },
-            'timeout': 1,
-        },
-        'say-yes': {
-            'expectations': {
-                "  <(Yes|Ok)>": 'say-yes-tab',
-                "\\033\[41m<(Yes|Ok)>": 'say-yes-space',
-            },
-            'timeout': 1,
-        },
-        'say-yes-tab': {
-            'input': "\t",
-            'expectations': {
-                ".": 'say-yes',
-            },
-            'timeout': 1,
-        },
-        'say-yes-space': {
-            'input': " ",
-            'expectations': {
-                ".": 'default',
-            },
-            'timeout': 1,
-        },
-        'accept-snowball': {
-            'expectations': {"<Ok>": 'accept-snowball-1'},
-            'timeout': 1,
-        },
-        'accept-snowball-1': {
-            'input': "\t ",
-            'expectations': {
-                "Do you accept": 'say-yes',
-            },
-            'timeout': 1,
-        },
-    }
-
-    state = 'waiting'
-
-    while True:
-        state_data = states[state]
-        patterns = []
-        next_state_names = []
-        if 'input' in state_data:
-            proc.send(state_data['input'])
-        for pattern, next_state in state_data['expectations'].items():
-            patterns.append(pattern)
-            next_state_names.append(next_state)
-        patterns.append(pexpect.EOF)
-        next_state_names.append(None)
-        logging.debug('waiting for %r' % patterns)
-        match_id = proc.expect(patterns, timeout=state_data['timeout'])
-        state = next_state_names[match_id]
-        if state is None:
-            return

=== removed file 'lava_dispatcher/client/targetdevice.py'
--- lava_dispatcher/client/targetdevice.py	2013-08-30 22:15:05 +0000
+++ lava_dispatcher/client/targetdevice.py	1970-01-01 00:00:00 +0000
@@ -1,123 +0,0 @@ 
-# Copyright (C) 2012 Linaro Limited
-#
-# Author: Andy Doan <andy.doan@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import logging
-import os
-import time
-
-from lava_dispatcher.errors import (
-    CriticalError,
-)
-from lava_dispatcher.client.base import (
-    LavaClient,
-)
-from lava_dispatcher.device.target import (
-    get_target,
-)
-from lava_dispatcher.utils import (
-    mk_targz,
-)
-
-
-class TargetBasedClient(LavaClient):
-    """This is a wrapper around the lava_dispatcher.device.target class that
-    provides the additional functionality that's needed by lava-dispatcher
-    actions that depend on a LavaClient
-    """
-
-    def __init__(self, context, config):
-        super(TargetBasedClient, self).__init__(context, config)
-        self.target_device = get_target(context, config)
-
-    def deploy_linaro_android(self, boot, system, data, rootfstype='ext4'):
-        self.target_device.deploy_android(boot, system, data)
-
-    def deploy_linaro(self, hwpack=None, rootfs=None, image=None,
-                      rootfstype='ext3', bootloadertype='u_boot'):
-        if image is None:
-            if hwpack is None or rootfs is None:
-                raise CriticalError(
-                    "must specify both hwpack and rootfs when not specifying image")
-        elif hwpack is not None or rootfs is not None:
-            raise CriticalError(
-                "cannot specify hwpack or rootfs when specifying image")
-
-        if image is None:
-            self.target_device.deploy_linaro(hwpack, rootfs, bootloadertype)
-        else:
-            self.target_device.deploy_linaro_prebuilt(image, bootloadertype)
-
-    def deploy_linaro_kernel(self, kernel, ramdisk=None, dtb=None, rootfs=None, 
-                             bootloader=None, firmware=None, rootfstype='ext4', 
-                             bootloadertype='u_boot'):
-        self.target_device.deploy_linaro_kernel(kernel, ramdisk, dtb, rootfs, 
-                                                bootloader, firmware, rootfstype,
-                                                bootloadertype)
-
-    def _boot_linaro_image(self):
-        if self.proc:
-            logging.warning('device already powered on, powering off first')
-            self._power_off_device()
-        self.proc = self.target_device.power_on()
-
-    def _boot_linaro_android_image(self):
-        """Booting android or ubuntu style images don't differ much"""
-
-        logging.info('ensuring ADB port is ready')
-        adb_port = self.target_device.config.android_adb_port
-        while self.context.run_command("sh -c 'netstat -an | grep %s.*TIME_WAIT'" % adb_port) == 0:
-            logging.info("waiting for TIME_WAIT %s socket to finish" % adb_port)
-            time.sleep(3)
-
-        self._boot_linaro_image()
-
-    def reliable_session(self):
-        return self.tester_session()
-
-    def retrieve_results(self, result_disk):
-        self._power_off_device()
-
-        td = self.target_device
-        tar = os.path.join(td.scratch_dir, 'lava_results.tgz')
-        result_dir = self.context.config.lava_result_dir
-        with td.file_system(td.config.root_part, result_dir) as mnt:
-            mk_targz(tar, mnt)
-        return tar
-
-    def get_test_data_attachments(self):
-        '''returns attachments to go in the "lava_results" test run'''
-        a = super(TargetBasedClient, self).get_test_data_attachments()
-        a.extend(self.target_device.get_test_data_attachments())
-        return a
-
-    def finish(self):
-        self._power_off_device()
-
-    def _power_off_device(self):
-        """
-        Powers the associated device off by calling its power_off() method.
-
-        Can be called multiple times, but only the first will be effective, all
-        the others will be no-ops (unless the device is powered on again by
-        calling one of the _boot* methods).
-        """
-        if self.proc:
-            self.target_device.power_off(self.proc)
-            self.proc = None

=== removed file 'lava_dispatcher/config.py'
--- lava_dispatcher/config.py	2013-09-04 11:07:28 +0000
+++ lava_dispatcher/config.py	1970-01-01 00:00:00 +0000
@@ -1,325 +0,0 @@ 
-# Copyright (C) 2011 Linaro Limited
-#
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-from ConfigParser import ConfigParser
-import os
-import StringIO
-import logging
-
-from configglue import parser, schema
-
-
-class DeviceSchema(schema.Schema):
-    android_binary_drivers = schema.StringOption()
-    cts_media_url = schema.StringOption()
-    boot_cmds = schema.StringOption(fatal=True)  # Can do better here
-    boot_cmds_android = schema.StringOption(fatal=True)  # And here
-    boot_cmds_oe = schema.StringOption(fatal=True)  # And here?
-    boot_cmds_tftp = schema.StringOption()
-    read_boot_cmds_from_image = schema.BoolOption(default=True)
-    boot_options = schema.ListOption()
-    boot_linaro_timeout = schema.IntOption(default=300)
-    boot_part = schema.IntOption(fatal=True)
-    boot_part_android_org = schema.IntOption()
-    boot_retries = schema.IntOption(default=3)
-    bootloader_prompt = schema.StringOption()
-    cache_part_android_org = schema.IntOption()
-    client_type = schema.StringOption()
-    connection_command = schema.StringOption(fatal=True)
-    data_part_android = schema.IntOption()
-    data_part_android_org = schema.IntOption()
-    default_network_interface = schema.StringOption()
-    disablesuspend_timeout = schema.IntOption(default=240)
-    device_type = schema.StringOption(fatal=True)
-    enable_network_after_boot_android = schema.BoolOption(default=True)
-    git_url_disablesuspend_sh = schema.StringOption()
-    hard_reset_command = schema.StringOption()
-    hostname = schema.StringOption()
-    image_boot_msg = schema.StringOption()
-    interrupt_boot_command = schema.StringOption()
-    interrupt_boot_prompt = schema.StringOption()
-    lmc_dev_arg = schema.StringOption()
-    master_str = schema.StringOption(default="root@master")
-    pre_connect_command = schema.StringOption()
-    qemu_binary = schema.StringOption(default="qemu-system-arm")
-    qemu_options = schema.StringOption()
-    qemu_drive_interface = schema.StringOption()
-    qemu_machine_type = schema.StringOption()
-    power_on_cmd = schema.StringOption()  # for sdmux
-    power_off_cmd = schema.StringOption()  # for sdmux
-    reset_port_command = schema.StringOption()
-    root_part = schema.IntOption()
-    sdcard_part_android = schema.IntOption()
-    sdcard_part_android_org = schema.IntOption()
-    soft_boot_cmd = schema.StringOption(default="reboot")
-    sys_part_android = schema.IntOption()
-    sys_part_android_org = schema.IntOption()
-    val = schema.StringOption()
-    sdcard_mountpoint_path = schema.StringOption(default="/storage/sdcard0")
-    possible_partitions_files = schema.ListOption(default=["init.partitions.rc",
-                                                           "fstab.partitions",
-                                                           "init.rc"])
-    boot_files = schema.ListOption(default=['boot.txt', 'uEnv.txt'])
-    boot_device = schema.IntOption(fatal=True)
-    testboot_offset = schema.IntOption(fatal=True)
-    # see doc/sdmux.rst for details
-    sdmux_id = schema.StringOption()
-    sdmux_usb_id = schema.StringOption()
-    sdmux_mount_retry_seconds = schema.IntOption(default=20)
-    sdmux_mount_wait_seconds = schema.IntOption(default=10)
-    sdmux_version = schema.StringOption(default="unknown")
-
-    simulator_version_command = schema.StringOption()
-    simulator_command = schema.StringOption()
-    simulator_axf_files = schema.ListOption()
-    simulator_kernel_files = schema.ListOption(default=None)
-    simulator_initrd_files = schema.ListOption(default=None)
-    simulator_dtb = schema.StringOption(default=None)
-    simulator_uefi = schema.StringOption(default=None)
-    simulator_boot_wrapper = schema.StringOption(default=None)
-
-    android_disable_suspend = schema.BoolOption(default=True)
-    android_adb_over_usb = schema.BoolOption(default=False)
-    android_adb_over_tcp = schema.BoolOption(default=True)
-    android_adb_port = schema.StringOption(default="5555")
-    android_wait_for_home_screen = schema.BoolOption(default=True)
-    android_wait_for_home_screen_activity = schema.StringOption(
-        default="Displayed com.android.launcher/com.android.launcher2.Launcher:")
-    android_home_screen_timeout = schema.IntOption(default=1800)
-    android_boot_prompt_timeout = schema.IntOption(default=1200)
-    android_orig_block_device = schema.StringOption(default="mmcblk0")
-    android_lava_block_device = schema.StringOption(default="mmcblk0")
-    partition_padding_string_org = schema.StringOption(default="p")
-    partition_padding_string_android = schema.StringOption(default="p")
-
-    arm_probe_binary = schema.StringOption(default='/usr/local/bin/arm-probe')
-    arm_probe_config = schema.StringOption(default='/usr/local/etc/arm-probe-config')
-    arm_probe_channels = schema.ListOption(default=['VDD_VCORE1'])
-
-    adb_command = schema.StringOption()
-    fastboot_command = schema.StringOption()
-    shared_working_directory = schema.StringOption(default=None)
-
-    uefi_image_filename = schema.StringOption(default=None)
-    vexpress_uefi_path = schema.StringOption(default=None)
-    vexpress_uefi_backup_path = schema.StringOption(default=None)
-    vexpress_stop_autoboot_prompt = schema.StringOption(
-        default='Press Enter to stop auto boot...')
-    vexpress_usb_mass_storage_device = schema.StringOption(default=None)
-
-    ecmeip = schema.StringOption()
-
-
-class OptionDescriptor(object):
-    def __init__(self, name):
-        self.name = name
-
-    def __get__(self, inst, cls=None):
-        return inst.cp.get('__main__', self.name)
-
-
-class DeviceConfig(object):
-
-    def __init__(self, cp):
-        self.cp = cp
-
-    for option in DeviceSchema().options():
-        locals()[option.name] = OptionDescriptor(option.name)
-
-
-class DispatcherSchema(schema.Schema):
-    lava_cachedir = schema.StringOption()
-    lava_cookies = schema.StringOption()
-    lava_image_tmpdir = schema.StringOption()
-    lava_image_url = schema.StringOption()
-    lava_proxy = schema.StringOption()
-    lava_result_dir = schema.StringOption()
-    lava_server_ip = schema.StringOption(fatal=True)
-    lava_test_deb = schema.StringOption()
-    lava_test_url = schema.StringOption()
-    logging_level = schema.IntOption()
-
-
-class DispatcherConfig(object):
-
-    def __init__(self, cp):
-        self.cp = cp
-
-    for option in DispatcherSchema().options():
-        locals()[option.name] = OptionDescriptor(option.name)
-
-
-user_config_path = os.path.expanduser("~/.config/lava-dispatcher")
-
-if "VIRTUAL_ENV" in os.environ:
-    system_config_path = os.path.join(os.environ["VIRTUAL_ENV"],
-                                      "etc/lava-dispatcher")
-else:
-    system_config_path = "/etc/lava-dispatcher"
-
-deprecated_system_config_path = "/etc/xdg/lava-dispatcher"
-
-default_config_path = os.path.join(os.path.dirname(__file__),
-                                   'default-config/lava-dispatcher')
-
-custom_config_path = None
-
-
-def search_path():
-    if custom_config_path:
-        return [
-            custom_config_path,
-            default_config_path,
-        ]
-    else:
-        return [
-            user_config_path,
-            system_config_path,
-            default_config_path,
-        ]
-
-
-def write_path():
-    """
-    Returns the configuration directories where configuration files should be
-    written to.
-
-    Returns an array with a list of directories. Client tools should then write
-    any configuration files to the first directory in that list that is
-    writable by the user.
-    """
-    if custom_config_path:
-        return [custom_config_path]
-    else:
-        # Since usually you need to run the dispatcher as root, but lava-tool
-        # as a regular user, we give preference to writing to the system
-        # configuration to avoid the user writing config file to ~user, and the
-        # dispatcher looking for them at ~root.
-        return [system_config_path, user_config_path]
-
-
-def _read_into(path, cp):
-    s = StringIO.StringIO()
-    s.write('[__main__]\n')
-    s.write(open(path).read())
-    s.seek(0)
-    cp.readfp(s)
-
-
-def _get_config(name, cp):
-    """Read a config file named name + '.conf'.
-
-    This checks and loads files from the source tree, site wide location and
-    home directory -- in that order, so home dir settings override site
-    settings which override source settings.
-    """
-    config_files = []
-    for directory in search_path():
-        path = os.path.join(directory, '%s.conf' % name)
-        if os.path.exists(path):
-            config_files.append(path)
-    if not config_files:
-        raise Exception("no config files named %r found" % (name + ".conf"))
-    config_files.reverse()
-    for path in config_files:
-        _read_into(path, cp)
-    return cp
-
-
-def get_config():
-    cp = parser.SchemaConfigParser(DispatcherSchema())
-    _get_config("lava-dispatcher", cp)
-    valid, report = cp.is_valid(report=True)
-    if not valid:
-        logging.warning("dispatcher config is not valid:\n    %s", '\n    '.join(report))
-    return DispatcherConfig(cp)
-
-
-def _hack_boot_options(scp):
-    """
-    Boot options are built by creating sections for each item in the
-    boot_options list. Those sections are managed by
-    lava_dispatcher.device.boot_options so we ignore here
-    """
-    scp.extra_sections = set(scp.get('__main__', 'boot_options'))
-
-
-def _hack_report(report):
-    """
-    ConfigGlue makes warning for somethings we don't want to warn about. In
-    particular, it will warn if a value isn't known to the config such as
-    in the case where you are using config variables or where you define
-    something like a boot_option for master like "boot_cmds_fdt"
-    """
-    scrubbed = []
-    ignores = [
-        'Configuration includes invalid options for section',
-    ]
-    for err in report:
-        for ignore in ignores:
-            if not err.startswith(ignore):
-                scrubbed.append(err)
-    return scrubbed
-
-
-def get_device_config(name):
-    # We read the device config once to get the device type, then we start
-    # again and read device-defaults, device-types/$device-type and
-    # devices/$device in that order.
-    initial_config = ConfigParser()
-    _get_config("devices/%s" % name, initial_config)
-
-    real_device_config = parser.SchemaConfigParser(DeviceSchema())
-    _get_config("device-defaults", real_device_config)
-    _get_config(
-        "device-types/%s" % initial_config.get('__main__', 'device_type'),
-        real_device_config)
-    _get_config("devices/%s" % name, real_device_config)
-    real_device_config.set("__main__", "hostname", name)
-    _hack_boot_options(real_device_config)
-    valid, report = real_device_config.is_valid(report=True)
-    if not valid:
-        report = _hack_report(report)
-        if len(report) > 0:
-            report = '\n    '.join(report)
-            logging.warning(
-                "Device config for %s is not valid:\n    %s", name, report)
-
-    return DeviceConfig(real_device_config)
-
-
-def get_devices():
-    devices = []
-    for config_dir in search_path():
-        devices_dir = os.path.join(config_dir, 'devices')
-        if os.path.isdir(devices_dir):
-            for d in os.listdir(devices_dir):
-                if d.endswith('.conf'):
-                    d = os.path.splitext(d)[0]
-                    devices.append(get_device_config(d))
-    return devices
-
-
-def get_config_file(config_file):
-    for config_dir in search_path():
-        config_file_path = os.path.join(config_dir, config_file)
-        if os.path.exists(config_file_path):
-            return config_file_path
-    return None

=== removed file 'lava_dispatcher/context.py'
--- lava_dispatcher/context.py	2013-09-05 23:08:13 +0000
+++ lava_dispatcher/context.py	1970-01-01 00:00:00 +0000
@@ -1,168 +0,0 @@ 
-# Copyright (C) 2011 Linaro Limited
-#
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import atexit
-import logging
-import os
-import subprocess
-import sys
-import tempfile
-
-from lava_dispatcher.config import get_device_config
-from lava_dispatcher.client.targetdevice import TargetBasedClient
-from lava_dispatcher.test_data import LavaTestData
-from lava_dispatcher.utils import (
-    logging_spawn,
-    rmtree,
-)
-
-
-class Flusher(object):
-    """
-    A Decorator for stream objects that makes all writes flush immediately
-    """
-    def __init__(self, stream):
-        self.stream = stream
-
-    def write(self, data):
-        self.stream.write(data)
-        self.stream.flush()
-
-    def __getattr__(self, name):
-        return getattr(self.stream, name)
-
-
-class Outputter(object):
-    """
-    Handles the problem of where to send the output. Always sends to stdout,
-    and if you pass an output directory it will also store the log in a file
-    called output.txt inside that directory.
-
-    During initialization, also sets up the logging subsystem to use the same
-    output.
-    """
-
-    def __init__(self, output_dir):
-
-        self._output_dir = output_dir
-        if output_dir:
-            output_txt = os.path.join(output_dir, 'output.txt')
-            output_pipe = subprocess.Popen(['tee', output_txt], stdin=subprocess.PIPE)
-            self.logfile_read = Flusher(output_pipe.stdin)
-        else:
-            self.logfile_read = Flusher(sys.stdout)
-
-        log_handler = logging.StreamHandler(self.logfile_read)
-        FORMAT = '<LAVA_DISPATCHER>%(asctime)s %(levelname)s: %(message)s'
-        DATEFMT = '%Y-%m-%d %I:%M:%S %p'
-        log_handler.setFormatter(
-            logging.Formatter(fmt=FORMAT, datefmt=DATEFMT))
-        del logging.root.handlers[:]
-        del logging.root.filters[:]
-        logging.root.addHandler(log_handler)
-
-    @property
-    def output_dir(self):
-        return self._output_dir
-
-    def write_named_data(self, name, data):
-        if self.output_dir is None:
-            return
-        with open(os.path.join(self.output_dir, name), 'w') as outf:
-            outf.write(data)
-
-
-class LavaContext(object):
-    def __init__(self, target, dispatcher_config, oob_file, job_data, output_dir):
-        self.config = dispatcher_config
-        self.job_data = job_data
-        self.output = Outputter(output_dir)
-        self.logfile_read = self.output.logfile_read
-        self.device_config = get_device_config(target)
-        self._client = TargetBasedClient(self, self.device_config)
-        self.test_data = LavaTestData()
-        self.oob_file = oob_file
-        self._host_result_dir = None
-        self.any_device_bundles = False
-
-    @property
-    def client(self):
-        return self._client
-
-    @property
-    def any_host_bundles(self):
-        return (self._host_result_dir is not None
-                and len(os.listdir(self._host_result_dir)) > 0)
-
-    @property
-    def host_result_dir(self):
-        if self._host_result_dir is None:
-            self._host_result_dir = tempfile.mkdtemp()
-            atexit.register(rmtree, self._host_result_dir)
-        return self._host_result_dir
-
-    def get_device_version(self):
-        return self.client.target_device.get_device_version()
-
-    def spawn(self, command, timeout=30):
-        proc = logging_spawn(command, timeout)
-        proc.logfile_read = self.logfile_read
-        return proc
-
-    def log(self, msg):
-        self.logfile_read.write(msg)
-        if not msg.endswith('\n'):
-            self.logfile_read.write('\n')
-
-    def run_command(self, command, failok=True):
-        """run command 'command' with output going to output-dir if specified"""
-        if isinstance(command, (str, unicode)):
-            command = ['nice', 'sh', '-c', command]
-        logging.debug("Executing on host : '%r'" % command)
-        output_args = {
-            'stdout': self.logfile_read,
-            'stderr': subprocess.STDOUT,
-        }
-        if failok:
-            rc = subprocess.call(command, **output_args)
-        else:
-            rc = subprocess.check_call(command, **output_args)
-        return rc
-
-    def run_command_get_output(self, command):
-        """run command 'command' then return the command output"""
-        if isinstance(command, (str, unicode)):
-            command = ['sh', '-c', command]
-        logging.debug("Executing on host : '%r'" % command)
-        return subprocess.check_output(command) 
-
-    def finish(self):
-        self.client.finish()
-
-    def assign_transport(self, transport):
-        self.transport = transport
-
-    def assign_group_data(self, group_data):
-        """
-        :param group_data: Arbitrary data related to the
-        group configuration, passed in via the GroupDispatcher
-        Used by lava-group
-        """
-        self.group_data = group_data

=== removed directory 'lava_dispatcher/default-config'
=== removed directory 'lava_dispatcher/default-config/lava-dispatcher'
=== removed file 'lava_dispatcher/default-config/lava-dispatcher/README'
--- lava_dispatcher/default-config/lava-dispatcher/README	2013-01-16 22:52:58 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/README	1970-01-01 00:00:00 +0000
@@ -1,38 +0,0 @@ 
-Configuration files for lava-dispatcher
-=======================================
-
-lava-dispatcher looks for files in:
-
- * Alongside the installation/source tree for the default values
-   (i.e. this directory).
-
- * /srv/lava/<instance>/etc/lava-dispatcher
-
-Each config directory can contain two files and two directories:
-
- * lava-dispatcher.conf
-
-   This file defines global settings of the dispatcher.  You will
-   almost certainly need to customize LAVA_SERVER_IP and LAVA_PROXY
-   for your install. LAVA_PROXY could be empty if no proxy server
-   available.
-
- * device-defaults.conf
-
-   This file defines default values for all devices.  You probably
-   won't need to customize it.
-
- * device-types/
-
-   This directory contains a config file for each device type.  You
-   probably won't need to customize the settings for device types that
-   are already supported by lava-dispatcher, but if you are working on
-   supporting a new class of device, you will need to add a file here.
-
- * devices/
-
-   This directory contains a file per device that can be targeted by
-   lava-dispatcher.  For the most part this file just needs to contain
-   a line "device_type = <device type>", although other settings can
-   be included here.  You will definitely need to tell lava-dispatcher
-   about the devices you have!

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-defaults.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-defaults.conf	2013-09-04 11:07:28 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-defaults.conf	1970-01-01 00:00:00 +0000
@@ -1,141 +0,0 @@ 
-# The default device settings.
-
-# All device settings default to these values unless they are
-# overwritten by the specific device type file
-# (device-types/${TYPE}.conf) or the specific device file
-# (devices/${DEVICE}.conf).
-
-# The client_type.  This determines how we connect, deploy to and
-# control the booting of the device.  'master', the default, means a
-# board that boots into a known good image by default but can be
-# manipulated to boot from different boot and rootfs filesystems.
-# 'qemu' is the other possible value at this time.
-client_type = master
-
-connection_command = conmux-console %(hostname)s
-
-pre_connect_command =
-
-# The bootloader commands to boot the device into the test image (we
-# assume that the device boots into the master image without bootloader
-# intervention).
-#
-# XXX should be called # boot_test_image_commands ?
-boot_cmds =
-
-# The bootloader commands to boot the device into an android-based test
-# image.
-#
-# XXX should be called # boot_android_test_image_commands ?
-boot_cmds_android =
-
-# The bootloader commands to boot the device into an OpenEmbedded test image.
-#
-# XXX should be called # boot_oe_test_image_commands ?
-boot_cmds_oe =
-
-# The device type.  Settings in device-types/${DEVICE_TYPE}.conf
-# override settings in this file, but are overridden by the
-# devices/${DEVICE}.conf file.
-device_type =
-
-# The network interface that comes up by default
-default_network_interface = eth0
-
-# boot partition number, counting from 1
-#
-# This is used to divide up the image produced by linaro-media-create
-# into sections to write onto the device.
-boot_part = 1
-
-# root partition number, counting from 1
-#
-# This is used to divide up the image produced by linaro-media-create
-# into sections to write onto the device.
-root_part = 2
-
-# Original linaro-android-media-create generated Android system SD card layout
-#
-# This is used to regenerate init script when Android bootup for partitions are
-# changed if deployed in LAVA test image
-#
-# boot partition number in original Android, counting from 1
-boot_part_android_org = 1
-# sys partition number in original Android, counting from 1
-sys_part_android_org = 2
-# cache partition number in original Android, counting from 1
-cache_part_android_org = 3
-# data partition number in original Android, counting from 1
-data_part_android_org = 5
-# sdcard partition number in original Android, counting from 1
-sdcard_part_android_org = 6
-
-# Android LAVA test image SD card layout
-#
-# This is used to regenerate init script when Android bootup for partitions are
-# changed if deployed in LAVA test image
-#
-# sys partition number in LAVA test image, counting from 1
-sys_part_android = 5
-# sdcard partition number in LAVA test image, counting from 1
-sdcard_part_android = 7
-# data partition number in LAVA test image, counting from 1
-data_part_android = 6
-
-# Master image recognization string
-# This is only used to detect that the master image has booted, it is
-# overwritten after that.
-# MASTER_STR = root@master
-
-# The string to look for to interrupt the boot process
-interrupt_boot_prompt = Hit any key to stop autoboot
-
-# The string command to stop the normal boot process
-interrupt_boot_command = ""
-
-# The string to look for to know that the boot process has begun
-image_boot_msg = Linux version
-
-# The character the boot loader uses as a prompt on this board.
-bootloader_prompt = #
-
-# The argument to pass to --dev when invoking linaro-media-create
-# Defaults to device_type because that's what was used before this
-# option was introduced.
-lmc_dev_arg = %(device_type)s
-
-# The value to pass to qemu-system-arm's -M option.
-qemu_machine_type = %(device_type)s
-
-# QEMU drive interface.
-qemu_drive_interface = sd
-
-# This is for android build where the network is not up by default. 1 or 0
-enable_network_after_boot_android = 1
-
-# the url of disablesusepend.sh script in android git repository
-git_url_disablesuspend_sh = "http://android.git.linaro.org/gitweb?p=device/linaro/common.git;a=blob_plain;f=disablesuspend.sh;hb=refs/heads/linaro-ics"
-
-# the url of the media files for CTS test
-cts_media_url = http://testdata.validation.linaro.org/cts/media.tgz
-
-# how long the disablesuspend script should take to complete
-#disablesuspend_timeout = 240
-
-# This is the actual boot device for test images. Can be overridden in device
-# specific config file.
-boot_device = 0
-
-# The test boot offset that should be added to the boot partition for data
-# obtained from boot.txt or uEnv.txt and others. Can be overridden in device
-# specific config file.
-testboot_offset = 2
-
-# How many times the dispatcher should try to reboot master and test images before failing
-boot_retries = 3
-
-# For an sdmux enabled device, maximum amount of time to wait for the /dev/sdX to appear after device has been switched
-# to host mode
-sdmux_mount_retry_seconds = 20
-# How long to wait after the /dev/sdX entry has turned up before trying to unmount anything attached to it
-sdmux_mount_wait_seconds = 10

=== removed directory 'lava_dispatcher/default-config/lava-dispatcher/device-types'
=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/aa9.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/aa9.conf	2013-09-03 21:46:48 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/aa9.conf	1970-01-01 00:00:00 +0000
@@ -1,25 +0,0 @@ 
-client_type = bootloader
-
-boot_cmds = 
-    setenv bootcmd "'fatload mmc 0:3 0x40000000 uImage; fatload mmc 0:3 0x41100000 uInitrd; fatload mmc 0:3 0x41000000 board.dtb; bootm 0x40000000 0x41100000 0x41000000'",
-    setenv bootargs "'console=ttyS0,115200n8 root=LABEL=testrootfs rootwait ro'",
-    boot
-
-boot_cmds_android = 
-    setenv bootcmd "'fatload mmc 0:3 0x40000000 uImage; fatload mmc 0:3 0x41100000 uInitrd; fatload mmc 0:3 0x41000000 mb8ac0300eb.dtb; bootm 0x40000000 0x41100000 0x41000000'",
-    setenv bootargs "'console=ttyS0,115200n8 init=/init rootwait rw androidboot.hardware=fujitsusemiconductormb8ac0300-e'",
-    boot
-
-possible_partitions_files =
-    init.partitions.rc
-    fstab.partitions
-    init.rc
-    fstab.fujitsusemiconductormb8ac0300-e
-
-bootloader_prompt = u-boot
-
-boot_options =
-    boot_cmds
-
-[boot_cmds]
-default = boot_cmds

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/arndale.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/arndale.conf	2013-08-28 14:55:50 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/arndale.conf	1970-01-01 00:00:00 +0000
@@ -1,59 +0,0 @@ 
-client_type = bootloader
-boot_part = 2
-root_part = 3
-testboot_offset = 3
-
-boot_cmds = mmc rescan,
-    mmc part 0,
-    setenv bootcmd "'fatload mmc 0:5 0x40007000 uImage; fatload mmc 0:5 0x42000000 uInitrd; fatload mmc 0:5 0x41f00000 board.dtb; bootm 0x40007000 0x42000000 0x41f00000'",
-    setenv bootargs "'console=ttySAC2,115200n8  root=LABEL=testrootfs rootwait ro'",
-    boot
-
-# Original linaro-android-media-create generated Android system SD card layout
-boot_part_android_org = 2
-sys_part_android_org = 3
-cache_part_android_org = 5
-data_part_android_org = 6
-sdcard_part_android_org = 7
-
-# Android LAVA test image SD card layout
-sys_part_android = 6
-sdcard_part_android = 7
-data_part_android = 7
-
-android_orig_block_device = mmcblk1
-android_lava_block_device = mmcblk1
-
-boot_cmds_android = mmc init,
-    mmc part 0,
-    setenv bootcmd "'fatload mmc 0:5 0x40007000 uImage; fatload mmc 0:5 0x41000000 uInitrd; fatload mmc 0:5 0x41f00000 board.dtb; bootm 0x40007000 0x41000000 0x41f00000'",
-    setenv bootargs "'console=tty0 console=ttySAC2,115200n8 androidboot.hardware=exynos5250-arndale rootwait ro rootdelay=3 init=/init androidboot.console=ttySAC2 console=ttySAC2'",
-    boot
-
-boot_cmds_oe = mmc rescan,
-    mmc part 0,
-    setenv bootcmd "'fatload mmc 0:5 0x40007000 uImage; fatload mmc 0:5 0x41f00000 board.dtb; bootm 0x40007000 - 0x41f00000'",
-    setenv bootargs "'console=ttySAC2,115200n8  root=/dev/mmcblk1p6 rootwait ro'",
-    boot
-
-boot_cmds_tftp =
-    setenv autoload no,
-    setenv pxefile_addr_r "'0x50000000'",
-    setenv kernel_addr_r "'0x40007000'",
-    setenv initrd_addr_r "'0x42000000'",
-    setenv fdt_addr_r "'0x41f00000'",
-    setenv loadkernel "'tftp ${kernel_addr_r} ${lava_kernel}'",
-    setenv loadinitrd "'tftp ${initrd_addr_r} ${lava_ramdisk}; setenv initrd_size ${filesize}'",
-    setenv loadfdt "'tftp ${fdt_addr_r} ${lava_dtb}'",
-    setenv bootargs "'root=/dev/ram0 console=ttySAC2,115200n8 init --no-log ip=:::::eth0:dhcp'",
-    setenv bootcmd "'usb start; dhcp; setenv serverip ${lava_server_ip}; run loadkernel; run loadinitrd; run loadfdt; bootm ${kernel_addr_r} ${initrd_addr_r} ${fdt_addr_r}'",
-    boot
-
-bootloader_prompt = ARNDALE5250
-
-boot_options =
-    boot_cmds
-
-[boot_cmds]
-default = boot_cmds
-

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/beagle-xm.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/beagle-xm.conf	2013-08-28 14:55:50 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/beagle-xm.conf	1970-01-01 00:00:00 +0000
@@ -1,31 +0,0 @@ 
-client_type = bootloader
-boot_cmds = mmc init, 
-	mmc part 0, 
-    setenv bootcmd "'fatload mmc 0:3 0x80000000 uImage; 
-    fatload mmc 0:3 0x81600000 uInitrd; 
-    bootm 0x80000000 0x81600000'",
-    setenv bootargs "' console=tty0 console=ttyO2,115200n8
-    root=LABEL=testrootfs rootwait ro earlyprintk fixrtc nocompcache
-    vram=12M omapfb.debug=y omapfb.mode=dvi:1280x720MR-16@60'",
-    boot
-
-boot_cmds_android = mmc init,
-	mmc part 0,
-	setenv bootcmd "'fatload mmc 0:3 0x80000000 uImage;
-	fatload mmc 0:3 0x81600000 uInitrd;
-	bootm 0x80000000 0x81600000'",
-	setenv bootargs "'console=tty0 console=ttyO2,115200n8 
-	rootwait rw earlyprintk fixrtc nocompcache 
-	vram=12M omapfb.debug=y omapfb.mode=dvi:1280x720MR-16@60 
-	init=/init androidboot.console=ttyO2'",
-	boot
-
-qemu_machine_type = beaglexm
-lmc_dev_arg = beagle
-bootloader_prompt = OMAP3 beagleboard.org
-
-boot_options =
-    boot_cmds
-
-[boot_cmds]
-default = boot_cmds

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/beaglebone-black.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/beaglebone-black.conf	2013-08-28 14:55:50 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/beaglebone-black.conf	1970-01-01 00:00:00 +0000
@@ -1,40 +0,0 @@ 
-client_type = bootloader
-
-boot_cmds_tftp =
-    setenv autoload no,
-    setenv pxefile_addr_r "'0x50000000'",
-    setenv kernel_addr_r "'0x80200000'",
-    setenv initrd_addr_r "'0x81000000'",
-    setenv fdt_addr_r "'0x815f0000'",
-    setenv initrd_high "'0xffffffff'",
-    setenv fdt_high "'0xffffffff'",
-    setenv loadkernel "'tftp ${kernel_addr_r} ${lava_kernel}'",
-    setenv loadinitrd "'tftp ${initrd_addr_r} ${lava_ramdisk}; setenv initrd_size ${filesize}'",
-    setenv loadfdt "'tftp ${fdt_addr_r} ${lava_dtb}'",
-    setenv bootargs "'console=ttyO0,115200n8 root=/dev/ram0 earlyprintk ip=:::::eth0:dhcp'",
-    setenv bootcmd "'dhcp; setenv serverip ${lava_server_ip}; run loadkernel; run loadinitrd; run loadfdt; bootz ${kernel_addr_r} ${initrd_addr_r} ${fdt_addr_r}'",
-    boot
-
-boot_cmds_oe = 
-    setenv initrd_high "'0xffffffff'",
-    setenv fdt_high "'0xffffffff'",
-    setenv bootcmd "'fatload mmc 0:3 0x80200000 uImage; fatload mmc 0:3 0x815f0000 board.dtb; bootm 0x80200000 - 0x815f0000'",
-    setenv bootargs "'console=ttyO0,115200n8 root=/dev/mmcblk0p5 rootwait ro'",
-    boot
-
-boot_cmds = 
-    setenv initrd_high "'0xffffffff'",
-    setenv fdt_high "'0xffffffff'",
-    setenv bootcmd "'fatload mmc 0:3 0x80200000 uImage; fatload mmc 0:3 0x81600000 uInitrd; fatload mmc 0:3 0x815f0000 board.dtb; bootm 0x80200000 0x81600000 0x815f0000'",
-    setenv bootargs "'console=ttyO0,115200n8 root=LABEL=testrootfs rootwait ro'",
-    boot
-
-lmc_dev_arg = beaglebone
-
-bootloader_prompt = U-Boot
-
-boot_options =
-    boot_cmds
-
-[boot_cmds]
-default = boot_cmds

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/beaglebone.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/beaglebone.conf	2013-08-28 14:55:50 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/beaglebone.conf	1970-01-01 00:00:00 +0000
@@ -1,39 +0,0 @@ 
-client_type = bootloader
-boot_cmds_oe = 
-    setenv initrd_high "'0xffffffff'",
-    setenv fdt_high "'0xffffffff'",
-    setenv bootcmd "'fatload mmc 0:3 0x80200000 uImage; fatload mmc 0:3 0x815f0000 board.dtb; bootm 0x80200000 - 0x815f0000'",
-    setenv bootargs "'console=ttyO0,115200n8 root=/dev/mmcblk0p5 rootwait ro'",
-    boot
-
-boot_cmds = 
-    setenv initrd_high "'0xffffffff'",
-    setenv fdt_high "'0xffffffff'",
-    setenv bootcmd "'fatload mmc 0:3 0x80200000 uImage; fatload mmc 0:3 0x81600000 uInitrd; fatload mmc 0:3 0x815f0000 board.dtb; bootm 0x80200000 0x81600000 0x815f0000'",
-    setenv bootargs "'console=ttyO0,115200n8 root=LABEL=testrootfs rootwait ro'",
-    boot
-
-boot_cmds_tftp =
-    setenv autoload no,
-    setenv pxefile_addr_r "'0x50000000'",
-    setenv kernel_addr_r "'0x80200000'",
-    setenv initrd_addr_r "'0x81000000'",
-    setenv fdt_addr_r "'0x815f0000'",
-    setenv initrd_high "'0xffffffff'",
-    setenv fdt_high "'0xffffffff'",
-    setenv loadkernel "'tftp ${kernel_addr_r} ${lava_kernel}'",
-    setenv loadinitrd "'tftp ${initrd_addr_r} ${lava_ramdisk}; setenv initrd_size ${filesize}'",
-    setenv loadfdt "'tftp ${fdt_addr_r} ${lava_dtb}'",
-    setenv bootargs "'console=ttyO0,115200n8 root=/dev/ram0 earlyprintk ip=:::::eth0:dhcp'",
-    setenv bootcmd "'dhcp; setenv serverip ${lava_server_ip}; run loadkernel; run loadinitrd; run loadfdt; bootz ${kernel_addr_r} ${initrd_addr_r} ${fdt_addr_r}'",
-    boot,
-
-lmc_dev_arg = beaglebone
-
-bootloader_prompt = U-Boot
-
-boot_options =
-    boot_cmds
-
-[boot_cmds]
-default = boot_cmds

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/capri.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/capri.conf	2013-07-02 15:12:52 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/capri.conf	1970-01-01 00:00:00 +0000
@@ -1,46 +0,0 @@ 
-client_type = capri
-
-# The ADB command line.
-#
-# In the case where there are multiple android devices plugged into a
-# single host, this connection command must be overriden on each device to
-# include the serial number of the device, e.g.
-#
-#   serial_number = XXXXXXXXXXXXXXXX
-#   adb_command = adb -s %(serial_number)s
-adb_command = adb
-
-# The fastboot command.
-#
-# The same as above: if you have more than one device, you will want to
-# override this in your device config to add a serial number, e.g.
-#
-#   serial_number = XXXXXXXXXXXXXXXX
-#   fastboot_command = fastboot -s %(serial_number)s
-#
-# Of course, in the case you override both adb_command *and* fastboot_command,
-# you don't need to specify `serial_number` twice.
-fastboot_command = fastboot
-
-# Working directory for temporary files. By default, the usual place for LAVA
-# images will be used.
-#
-# This is useful when the lava dispatcher is controlling the device under test which is
-# physically plugged to other machines by setting adb_command to something like
-# "ssh <phone-host> adb" and fastboot_command to something like "ssh
-# <phone-host> fastboot". adb and fastboot always operate on local files, so
-# you need your local files to also be seen as local files on the host where
-# adb/fastboot are executed.
-#
-# In this case, you should set shared_working_directory to a shared directory
-# between the machine running the dispatcher and the machine where the phone is
-# plugged.  This shared directory must have the same path in both machines.
-# For example, you can have your /var/tmp/lava mounted at /var/tmp/lava at
-# <phone-host> (or the other way around).
-shared_working_directory =
-
-connection_command = %(adb_command)s shell
-
-enable_network_after_boot_android = false
-android_adb_over_usb = true
-android_adb_over_tcp = false

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/highbank.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/highbank.conf	2013-05-20 17:49:09 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/highbank.conf	1970-01-01 00:00:00 +0000
@@ -1,2 +0,0 @@ 
-client_type = ipmi_pxe
-connection_command = ipmitool -I lanplus -U admin -P admin -H %(ecmeip)s sol activate

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/k3v2.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/k3v2.conf	2013-06-10 20:26:18 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/k3v2.conf	1970-01-01 00:00:00 +0000
@@ -1,46 +0,0 @@ 
-client_type = k3v2
-
-# The ADB command line.
-#
-# In the case where there are multiple android devices plugged into a
-# single host, this connection command must be overriden on each device to
-# include the serial number of the device, e.g.
-#
-#   serial_number = XXXXXXXXXXXXXXXX
-#   adb_command = adb -s %(serial_number)s
-adb_command = adb
-
-# The fastboot command.
-#
-# The same as above: if you have more than one device, you will want to
-# override this in your device config to add a serial number, e.g.
-#
-#   serial_number = XXXXXXXXXXXXXXXX
-#   fastboot_command = fastboot -s %(serial_number)s
-#
-# Of course, in the case you override both adb_command *and* fastboot_command,
-# you don't need to specify `serial_number` twice.
-fastboot_command = fastboot
-
-# Working directory for temporary files. By default, the usual place for LAVA
-# images will be used.
-#
-# This is useful when the lava dispatcher is controlling the device under test which is
-# physically plugged to other machines by setting adb_command to something like
-# "ssh <phone-host> adb" and fastboot_command to something like "ssh
-# <phone-host> fastboot". adb and fastboot always operate on local files, so
-# you need your local files to also be seen as local files on the host where
-# adb/fastboot are executed.
-#
-# In this case, you should set shared_working_directory to a shared directory
-# between the machine running the dispatcher and the machine where the phone is
-# plugged.  This shared directory must have the same path in both machines.
-# For example, you can have your /var/tmp/lava mounted at /var/tmp/lava at
-# <phone-host> (or the other way around).
-shared_working_directory =
-
-connection_command = %(adb_command)s shell
-
-enable_network_after_boot_android = false
-android_adb_over_usb = true
-android_adb_over_tcp = false

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/keystone.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/keystone.conf	2013-08-29 00:09:02 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/keystone.conf	1970-01-01 00:00:00 +0000
@@ -1,25 +0,0 @@ 
-client_type = bootloader
-
-master_str = root@keystone-evm
-
-bootloader_prompt = TCI6638 EVM
-
-boot_cmds_tftp =
-    setenv autoload no,
-    setenv fdt_high "'0xffffffff'",
-    setenv loadkernel "'tftp ${addr_kern} ${lava_kernel}'",
-    setenv loadinitrd "'tftp ${addr_fs} ${lava_ramdisk}'",
-    setenv loadfdt "'tftp ${addr_fdt} ${lava_dtb}'",
-    setenv loadbootmon "'tftp ${addr_mon} ${lava_firmware}'",
-    setenv bootargs "'console=ttyS0,115200n8 rootwait=1 earlyprintk rdinit=/sbin/init rw root=/dev/ram0 initrd=0x802000000,9M ip=dhcp'",
-    setenv bootcmd "'dhcp; setenv serverip ${lava_server_ip}; run loadkernel; run loadinitrd; run loadfdt; run loadbootmon; run run_mon; run run_kern'",
-    boot
-
-boot_cmds = 
-    boot
-
-boot_options =
-    boot_cmds
-
-[boot_cmds]
-default = boot_cmds

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/kvm.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/kvm.conf	2013-08-23 03:22:03 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/kvm.conf	1970-01-01 00:00:00 +0000
@@ -1,4 +0,0 @@ 
-client_type = qemu
-qemu_binary = qemu-system-x86_64
-kvm_networking_options = -net nic,model=virtio -net user
-qemu_options = -machine accel=kvm:tcg -hda {DISK_IMAGE} -nographic %(kvm_networking_options)s

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/mx51evk.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/mx51evk.conf	2013-08-28 14:55:50 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/mx51evk.conf	1970-01-01 00:00:00 +0000
@@ -1,18 +0,0 @@ 
-client_type = bootloader
-boot_part = 2
-root_part = 3
-boot_cmds = mmc init,
-    mmc part 0,
-    setenv bootcmd "'fatload mmc 0:5 0x90000000 uImage; fatload mmc 0:5 
-    0x92000000 uInitrd; fatload mmc 0:5 0x91ff0000 board.dtb; bootm 
-    0x90000000 0x92000000 0x91ff0000'",
-    setenv bootargs "' console=tty0 console=ttymxc0,115200n8 
-    root=LABEL=testrootfs rootwait ro'",
-    boot
-bootloader_prompt = >
-
-boot_options =
-    boot_cmds
-
-[boot_cmds]
-default = boot_cmds

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/mx53loco.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/mx53loco.conf	2013-08-28 14:55:50 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/mx53loco.conf	1970-01-01 00:00:00 +0000
@@ -1,38 +0,0 @@ 
-client_type = bootloader
-boot_part = 2
-root_part = 3
-boot_cmds = mmc init,
-    mmc part 0,
-    setenv bootcmd "'fatload mmc 0:5 0x70800000 uImage; fatload mmc 
-    0:5 0x71800000 uInitrd; bootm 0x70800000 0x71800000'",
-    setenv bootargs "' console=tty0 console=ttymxc0,115200n8 
-    root=LABEL=testrootfs rootwait ro'",
-    boot
-
-# Original linaro-android-media-create generated Android system SD card layout
-boot_part_android_org = 2
-sys_part_android_org = 3
-cache_part_android_org = 5
-data_part_android_org = 6
-sdcard_part_android_org = 7
-
-# Android LAVA test image SD card layout
-sys_part_android = 6
-sdcard_part_android = 7
-data_part_android = 7
-boot_cmds_android = mmc init,
-    mmc part 0,
-    setenv bootcmd "'fatload mmc 0:5 0x70000000 uImage;
-    fatload mmc 0:5 0x72000000 uInitrd; bootm 0x70000000 0x72000000'",
-    setenv bootargs "'console=tty0 console=ttymxc0,115200n8 
-    rootwait ro earlyprintk rootdelay=1 fixrtc nocompcache di1_primary tve
-    init=/init androidboot.console=ttymxc0'",
-    boot
-bootloader_prompt = >
-
-boot_options =
-    boot_cmds
-
-[boot_cmds]
-default = boot_cmds
-read_boot_cmds_from_image = 0

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/nexus.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/nexus.conf	2013-03-29 18:42:26 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/nexus.conf	1970-01-01 00:00:00 +0000
@@ -1,46 +0,0 @@ 
-client_type = fastboot
-
-# The ADB command line.
-#
-# In the case where there are multiple android devices plugged into a
-# single host, this connection command must be overriden on each device to
-# include the serial number of the device, e.g.
-#
-#   serial_number = XXXXXXXXXXXXXXXX
-#   adb_command = adb -s %(serial_number)s
-adb_command = adb
-
-# The fastboot command.
-#
-# The same as above: if you have more than one device, you will want to
-# override this in your device config to add a serial number, e.g.
-#
-#   serial_number = XXXXXXXXXXXXXXXX
-#   fastboot_command = fastboot -s %(serial_number)s
-#
-# Of course, in the case you override both adb_command *and* fastboot_command,
-# you don't need to specify `serial_number` twice.
-fastboot_command = fastboot
-
-# Working directory for temporary files. By default, the usual place for LAVA
-# images will be used.
-#
-# This is useful when the lava dispatcher is controlling the device under test which is
-# physically plugged to other machines by setting adb_command to something like
-# "ssh <phone-host> adb" and fastboot_command to something like "ssh
-# <phone-host> fastboot". adb and fastboot always operate on local files, so
-# you need your local files to also be seen as local files on the host where
-# adb/fastboot are executed.
-#
-# In this case, you should set shared_working_directory to a shared directory
-# between the machine running the dispatcher and the machine where the phone is
-# plugged.  This shared directory must have the same path in both machines.
-# For example, you can have your /var/tmp/lava mounted at /var/tmp/lava at
-# <phone-host> (or the other way around).
-shared_working_directory =
-
-connection_command = %(adb_command)s shell
-
-enable_network_after_boot_android = false
-android_adb_over_usb = true
-android_adb_over_tcp = false

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/nexus10.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/nexus10.conf	2013-07-02 15:12:52 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/nexus10.conf	1970-01-01 00:00:00 +0000
@@ -1,46 +0,0 @@ 
-client_type = nexus10
-
-# The ADB command line.
-#
-# In the case where there are multiple android devices plugged into a
-# single host, this connection command must be overriden on each device to
-# include the serial number of the device, e.g.
-#
-#   serial_number = XXXXXXXXXXXXXXXX
-#   adb_command = adb -s %(serial_number)s
-adb_command = adb
-
-# The fastboot command.
-#
-# The same as above: if you have more than one device, you will want to
-# override this in your device config to add a serial number, e.g.
-#
-#   serial_number = XXXXXXXXXXXXXXXX
-#   fastboot_command = fastboot -s %(serial_number)s
-#
-# Of course, in the case you override both adb_command *and* fastboot_command,
-# you don't need to specify `serial_number` twice.
-fastboot_command = fastboot
-
-# Working directory for temporary files. By default, the usual place for LAVA
-# images will be used.
-#
-# This is useful when the lava dispatcher is controlling the device under test which is
-# physically plugged to other machines by setting adb_command to something like
-# "ssh <phone-host> adb" and fastboot_command to something like "ssh
-# <phone-host> fastboot". adb and fastboot always operate on local files, so
-# you need your local files to also be seen as local files on the host where
-# adb/fastboot are executed.
-#
-# In this case, you should set shared_working_directory to a shared directory
-# between the machine running the dispatcher and the machine where the phone is
-# plugged.  This shared directory must have the same path in both machines.
-# For example, you can have your /var/tmp/lava mounted at /var/tmp/lava at
-# <phone-host> (or the other way around).
-shared_working_directory =
-
-connection_command = %(adb_command)s shell
-
-enable_network_after_boot_android = false
-android_adb_over_usb = true
-android_adb_over_tcp = false

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/origen.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/origen.conf	2013-08-28 14:55:50 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/origen.conf	1970-01-01 00:00:00 +0000
@@ -1,37 +0,0 @@ 
-client_type = bootloader
-boot_part = 2
-root_part = 3
-testboot_offset = 3
-boot_cmds = mmc init,
-    mmc part 0,
-    setenv bootcmd "'fatload mmc 0:5 0x40007000 uImage; fatload mmc 0:5 0x42000000 uInitrd; fatload mmc 0:5 0x41f00000 board.dtb; bootm 0x40007000 0x42000000 0x41f00000'",
-    setenv bootargs "'console=ttySAC2,115200n8  root=LABEL=testrootfs rootwait ro'",
-    boot
-
-# Original linaro-android-media-create generated Android system SD card layout
-boot_part_android_org = 2
-sys_part_android_org = 3
-cache_part_android_org = 5
-data_part_android_org = 6
-sdcard_part_android_org = 7
-
-# Android LAVA test image SD card layout
-sys_part_android = 6
-sdcard_part_android = 7
-data_part_android = 7
-
-boot_cmds_android = mmc init,
-    mmc part 0,
-    setenv bootcmd "'fatload mmc 0:5 0x40007000 uImage;fatload mmc 0:5 0x42000000 uInitrd; bootm 0x40007000 0x42000000'",
-    setenv bootargs "'console=ttySAC2,115200n8 rootwait ro init=/init androidboot.console=ttySAC2'",
-    boot
-
-bootloader_prompt = ORIGEN
-
-lmc_dev_arg = origen
-
-boot_options =
-    boot_cmds
-
-[boot_cmds]
-default = boot_cmds

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/panda.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/panda.conf	2013-08-28 14:55:50 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/panda.conf	1970-01-01 00:00:00 +0000
@@ -1,68 +0,0 @@ 
-client_type = bootloader
-boot_cmds = mmc init,
-    mmc part 0,
-    setenv bootcmd "'fatload mmc 0:3 0x80200000 uImage; fatload mmc 
-    0:3 0x81600000 uInitrd; bootm 0x80200000 0x81600000'",
-    setenv bootargs "' console=tty0 console=ttyO2,115200n8 
-    root=LABEL=testrootfs rootwait ro earlyprintk fixrtc nocompcache 
-    vram=48M omapfb.vram=0:24M mem=456M@0x80000000 mem=512M@0xA0000000'",
-    boot
-boot_cmds_android = mmc init,
-    mmc part 0,
-    setenv bootcmd "'fatload mmc 0:3 0x80200000 uImage;
-    fatload mmc 0:3 0x81600000 uInitrd;
-    bootm 0x80200000 0x81600000'",
-    setenv bootargs "'console=tty0 console=ttyO2,115200n8 
-    rootwait rw earlyprintk fixrtc nocompcache vram=48M 
-    omapfb.vram=0:24M,1:24M mem=456M@0x80000000 mem=512M@0xA0000000 
-    init=/init androidboot.console=ttyO2'",
-    boot
-
-boot_cmds_oe = mmc init,
-    mmc part 0,
-    setenv initrd_high "0xffffffff",
-    setenv fdt_high "0xffffffff",
-    setenv bootcmd "'fatload mmc 0:3 0x80200000 uImage; bootm 0x80200000'",
-    setenv bootargs "' console=tty0 console=ttyO2,115200n8
-    root=/dev/mmcblk0p5 rootwait ro earlyprintk fixrtc nocompcache
-    vram=48M omapfb.vram=0:24M'",
-    boot
-
-boot_cmds_fdt = mmc init,
-    mmc part 0,
-    setenv bootcmd "'fatload mmc 0:3 0x80200000 uImage; fatload mmc 0:3 0x81600000 uInitrd; fatload mmc 0:3 0x815f0000 board.dtb; bootm 0x80200000 0x81600000 0x815f0000'",
-    setenv bootargs "'console=tty0 console=ttyO2,115200n8 root=LABEL=testrootfs rootwait ro earlyprintk fixrtc nocompcache vram=48M omapfb.vram=0:24M mem=456M@0x80000000 mem=512M@0xA0000000'",
-    boot
-
-boot_cmds_tftp = 
-    setenv autoload no,
-    setenv pxefile_addr_r "'0x50000000'",
-    setenv kernel_addr_r "'0x80200000'",
-    setenv initrd_addr_r "'0x81600000'",
-    setenv fdt_addr_r "'0x815f0000'",
-    setenv initrd_high "'0xffffffff'",
-    setenv fdt_high "'0xffffffff'",
-    setenv loadkernel "'tftp ${kernel_addr_r} ${lava_kernel}'",
-    setenv loadinitrd "'tftp ${initrd_addr_r} ${lava_ramdisk}; setenv initrd_size ${filesize}'",
-    setenv loadfdt "'tftp ${fdt_addr_r} ${lava_dtb}'",
-    setenv bootargs "'console=ttyO2,115200n8 root=/dev/ram0 fixrtc nocompcache vram=48M omapfb.vram=0:24M mem=456M@0x80000000 mem=512M@0xA0000000 ip=:::::eth0:dhcp init=init'",
-    setenv bootcmd "'usb start; dhcp; setenv serverip ${lava_server_ip}; run loadkernel; run loadinitrd; run loadfdt; bootm ${kernel_addr_r} ${initrd_addr_r} ${fdt_addr_r}'",
-    boot
-
-android_binary_drivers = http://192.168.1.21/LAVA_HTTP/android-binaries/panda-drivers.tgz
-possible_partitions_files =
-    init.partitions.rc
-    fstab.partitions
-    init.rc
-    fstab.omap4pandaboard
-
-# changed to /mnt/sdcard in  http://review.android.git.linaro.org/#change,3213
-sdcard_mountpoint_path = /mnt/sdcard
-
-bootloader_prompt = Panda
-
-boot_options =
-    boot_cmds
-
-[boot_cmds]
-default = boot_cmds

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/qemu.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/qemu.conf	2013-06-05 21:43:24 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/qemu.conf	1970-01-01 00:00:00 +0000
@@ -1,7 +0,0 @@ 
-client_type=qemu
-
-qemu_binary = qemu-system-arm
-qemu_options = -M %(qemu_machine_type)s -drive if=%(qemu_drive_interface)s,cache=writeback,file={DISK_IMAGE} -clock unix -device usb-kbd -device usb-mouse -usb -device usb-net,netdev=mynet -netdev user,id=mynet -net nic -net user -nographic
-
-qemu_machine_type = beaglexm
-qemu_drive_interface = sd

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_foundation-armv8.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_foundation-armv8.conf	2013-07-02 15:12:52 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_foundation-armv8.conf	1970-01-01 00:00:00 +0000
@@ -1,20 +0,0 @@ 
-client_type=fastmodel
-
-# how long the disablesuspend script should take to complete
-# fm takes longer than other android images do
-disablesuspend_timeout = 500
-
-# how long ubuntu takes to boot to prompt
-boot_linaro_timeout = 500
-
-# if you do dhcp on boot, adb will not work (asac) on fastmodels
-enable_network_after_boot_android = 0
-
-# we do usermode networking over the loopback
-default_network_interface = lo
-
-simulator_axf_files = img-foundation.axf
-
-simulator_version_command = /opt/arm/Foundation_v8pkg/Foundation_v8 --version | grep "ARM V8 Foundation Model" | sed 's/ARM V8 Foundation Model //'
-
-simulator_command = sudo -u www-data /opt/arm/Foundation_v8pkg/Foundation_v8 --image={AXF} --block-device={IMG} --network=nat

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_ve-a15x1-a7x1.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_ve-a15x1-a7x1.conf	2013-07-03 10:11:50 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_ve-a15x1-a7x1.conf	1970-01-01 00:00:00 +0000
@@ -1,117 +0,0 @@ 
-client_type=fastmodel
-
-# how long the disablesuspend script should take to complete
-# fm takes longer than other android images do
-disablesuspend_timeout = 500
-
-# how long ubuntu takes to boot to prompt
-boot_linaro_timeout = 800
-
-# if you do dhcp on boot, adb will not work (asac) on fastmodels
-enable_network_after_boot_android = 0
-
-# we do usermode networking over the loopback
-default_network_interface = lo
-
-bootloader_prompt = Start:
-
-interrupt_boot_prompt = The default boot selection will start in
-
-interrupt_boot_command = break
-
-# UEFI boot commands
-boot_cmds = sendline a,
-           expect Choice:,
-           sendline 1,
-           expect Select the Boot Device:,
-           sendline 2,
-           expect File path of the EFI Application or the kernel:,
-           sendline uImage,
-           expect [a/g/l],
-           sendline l,
-           expect Add an initrd: [y/n],
-           sendline y,
-           expect File path of the initrd:,
-           sendline uInitrd,
-           expect Arguments to pass to the binary:,
-           sendline 'console=ttyAMA0,38400n8 root=/dev/mmcblk0p2 rootwait ro mem=1024M',
-           expect File path of the local FDT:,
-           sendline rtsm\\rtsm_ve-ca15x1-ca7x1.dtb,
-           expect Description for this new Entry:,
-           sendline Test Image,
-           expect Choice:,
-           sendline 5,
-           expect Start:,
-           sendline 2
-
-simulator_axf_files =
-    img.axf
-    linux-system-ISW.axf
-    linux-system-semi.axf
-
-simulator_kernel_files =
-    uImage
-    vmlinuz.*
-
-simulator_initrd_files =
-    uInitrd
-    initrd.*
-
-simulator_dtb = rtsm_ve-ca15x1-ca7x1.dtb
-simulator_uefi = uefi_rtsm_ve-ca15.bin
-
-license_file = 8224@localhost
-sim_bin = /opt/arm/RTSM_A15-A7x14_VE/Linux64_RTSM_VE_Cortex-A15x1-A7x1/RTSM_VE_Cortex-A15x1-A7x1
-android_adb_port = 6555
-
-simulator_version_command = %(sim_bin)s --version | grep "Fast Models" | sed 's/Fast Models \[//' | sed 's/\]//'
-
-simulator_boot_wrapper = -a coretile.cluster0.*={AXF}
-
-simulator_command = sudo -u www-data ARMLMD_LICENSE_FILE="%(license_file)s" %(sim_bin)s
-
-boot_options =
-        motherboard.mmc.p_mmc_file
-	motherboard.hostbridge.userNetPorts
-	motherboard.smsc_91c111.enabled
-	motherboard.hostbridge.userNetworking
-        motherboard.flashloader0.fname
-        motherboard.flashloader1.fname
-        motherboard.flashloader1.fnameWrite
-	coretile.cache_state_modelled
-	coretile.cluster0.cpu0.semihosting-enable
-	coretile.cluster0.cpu0.semihosting-cmd_line
-
-[motherboard.mmc.p_mmc_file]
-default = {IMG}
-
-[motherboard.hostbridge.userNetPorts]
-default="%(android_adb_port)s=%(android_adb_port)s"
-
-[motherboard.smsc_91c111.enabled]
-default = 1
-allowed = 0,1
-
-[motherboard.hostbridge.userNetworking]
-default = 1
-allowed = 0,1
-
-[motherboard.flashloader0.fname]
-default = {UEFI}
-
-[motherboard.flashloader1.fname]
-default = uefi-vars.fd
-
-[motherboard.flashloader1.fnameWrite]
-default = uefi-vars.fd
-
-[coretile.cache_state_modelled]
-default = 0
-allowed = 0,1
-
-[coretile.cluster0.cpu0.semihosting-enable]
-default = 1
-allowed = 0,1
-
-[coretile.cluster0.cpu0.semihosting-cmd_line]
-default = "--kernel {KERNEL} --dtb {DTB} --initrd {INITRD} -- console=ttyAMA0,38400n8  root=/dev/mmcblk0p2  rootwait ro mem=1024M"

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_ve-a15x4-a7x4.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_ve-a15x4-a7x4.conf	2013-07-03 10:11:50 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_ve-a15x4-a7x4.conf	1970-01-01 00:00:00 +0000
@@ -1,117 +0,0 @@ 
-client_type=fastmodel
-
-# how long the disablesuspend script should take to complete
-# fm takes longer than other android images do
-disablesuspend_timeout = 500
-
-# how long ubuntu takes to boot to prompt
-boot_linaro_timeout = 800
-
-# if you do dhcp on boot, adb will not work (asac) on fastmodels
-enable_network_after_boot_android = 0
-
-# we do usermode networking over the loopback
-default_network_interface = lo
-
-bootloader_prompt = Start:
-
-interrupt_boot_prompt = The default boot selection will start in
-
-interrupt_boot_command = break
-
-# UEFI boot commands
-boot_cmds = sendline a,
-           expect Choice:,
-           sendline 1,
-           expect Select the Boot Device:,
-           sendline 2,
-           expect File path of the EFI Application or the kernel:,
-           sendline uImage,
-           expect [a/g/l],
-           sendline l,
-           expect Add an initrd: [y/n],
-           sendline y,
-           expect File path of the initrd:,
-           sendline uInitrd,
-           expect Arguments to pass to the binary:,
-           sendline 'console=ttyAMA0,38400n8 root=/dev/mmcblk0p2 rootwait ro mem=1024M',
-           expect File path of the local FDT:,
-           sendline rtsm\\rtsm_ve-ca15x4-ca7x4.dtb,
-           expect Description for this new Entry:,
-           sendline Test Image,
-           expect Choice:,
-           sendline 5,
-           expect Start:,
-           sendline 2
-
-simulator_axf_files =
-    img.axf
-    linux-system-ISW.axf
-    linux-system-semi.axf
-
-simulator_kernel_files =
-    uImage
-    vmlinuz.*
-
-simulator_initrd_files =
-    uInitrd
-    initrd.*
-
-simulator_dtb = rtsm_ve-ca15x4-ca7x4.dtb
-simulator_uefi = uefi_rtsm_ve-ca15.bin
-
-license_file = 8224@localhost
-sim_bin = /opt/arm/RTSM_A15-A7x14_VE/Linux64_RTSM_VE_Cortex-A15x4-A7x4/RTSM_VE_Cortex-A15x4-A7x4
-android_adb_port = 6555
-
-simulator_version_command = %(sim_bin)s --version | grep "Fast Models" | sed 's/Fast Models \[//' | sed 's/\]//'
-
-simulator_boot_wrapper = -a coretile.cluster0.*={AXF}
-
-simulator_command = sudo -u www-data ARMLMD_LICENSE_FILE="%(license_file)s" %(sim_bin)s
-
-boot_options =
-        motherboard.mmc.p_mmc_file
-	motherboard.hostbridge.userNetPorts
-	motherboard.smsc_91c111.enabled
-	motherboard.hostbridge.userNetworking
-        motherboard.flashloader0.fname
-        motherboard.flashloader1.fname
-        motherboard.flashloader1.fnameWrite
-	coretile.cache_state_modelled
-	coretile.cluster0.cpu0.semihosting-enable
-	coretile.cluster0.cpu0.semihosting-cmd_line
-
-[motherboard.mmc.p_mmc_file]
-default = {IMG}
-
-[motherboard.hostbridge.userNetPorts]
-default="%(android_adb_port)s=%(android_adb_port)s"
-
-[motherboard.smsc_91c111.enabled]
-default = 1
-allowed = 0,1
-
-[motherboard.hostbridge.userNetworking]
-default = 1
-allowed = 0,1
-
-[motherboard.flashloader0.fname]
-default = {UEFI}
-
-[motherboard.flashloader1.fname]
-default = uefi-vars.fd
-
-[motherboard.flashloader1.fnameWrite]
-default = uefi-vars.fd
-
-[coretile.cache_state_modelled]
-default = 0
-allowed = 0,1
-
-[coretile.cluster0.cpu0.semihosting-enable]
-default = 1
-allowed = 0,1
-
-[coretile.cluster0.cpu0.semihosting-cmd_line]
-default = "--kernel {KERNEL} --dtb {DTB} --initrd {INITRD} -- console=ttyAMA0,38400n8  root=/dev/mmcblk0p2  rootwait ro mem=1024M"

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_ve-armv8.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_ve-armv8.conf	2013-07-02 15:12:52 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/rtsm_ve-armv8.conf	1970-01-01 00:00:00 +0000
@@ -1,128 +0,0 @@ 
-client_type=fastmodel
-
-# how long the disablesuspend script should take to complete
-# fm takes longer than other android images do
-disablesuspend_timeout = 500
-
-# how long ubuntu takes to boot to prompt
-boot_linaro_timeout = 500
-
-#after enabled the network, we can set it to true
-enable_network_after_boot_android = 1
-
-# change to use eth0 after we enabled the network
-default_network_interface = eth0
-
-bootloader_prompt = Start:
-
-interrupt_boot_prompt = The default boot selection will start in
-
-interrupt_boot_command = break
-
-# UEFI boot commands
-boot_cmds = sendline a,
-           expect Choice:,
-           sendline 1,
-           expect Select the Boot Device:,
-           sendline 2,
-           expect File path of the EFI Application or the kernel:,
-           sendline uImage,
-           expect [a/g/l],
-           sendline l,
-           expect Add an initrd: [y/n],
-           sendline y,
-           expect File path of the initrd:,
-           sendline uInitrd,
-           expect Arguments to pass to the binary:,
-           sendline 'console=ttyAMA0,38400n8 root=/dev/mmcblk0p2 rootwait ro mem=1024M',
-           expect File path of the local FDT:,
-           sendline rtsm\\rtsm_ve-ca15x1-ca7x1.dtb,
-           expect Description for this new Entry:,
-           sendline Test Image,
-           expect Choice:,
-           sendline 5,
-           expect Start:,
-           sendline 2
-
-simulator_axf_files = linux-system.axf
-
-license_file = 8224@localhost
-sim_bin = /opt/arm/RTSMv8_VE/bin/model_shell64
-sim_model = /opt/arm/RTSMv8_VE/models/Linux64_GCC-4.1/RTSM_VE_AEMv8A.so
-android_adb_port = 5555
-interfaceName = armv8_01
-
-simulator_version_command = %(sim_bin)s --version | grep "Model Shell" | sed 's/Model Shell //'
-
-simulator_boot_wrapper = -a {AXF}
-
-simulator_command = sudo -u www-data ARMLMD_LICENSE_FILE="%(license_file)s" %(sim_bin)s %(sim_model)s
-
-boot_options =
-        motherboard.mmc.p_mmc_file
-	motherboard.smsc_91c111.enabled
-	cluster.NUM_CORES
-	cluster.cpu0.unpredictable_WPMASKANDBAS
-	cluster.cpu0.unpredictable_non-contigous_BAS
-	cluster.cpu1.unpredictable_WPMASKANDBAS
-	cluster.cpu1.unpredictable_non-contigous_BAS
-	cluster.cpu2.unpredictable_WPMASKANDBAS
-	cluster.cpu2.unpredictable_non-contigous_BAS
-	cluster.cpu3.unpredictable_WPMASKANDBAS
-	cluster.cpu3.unpredictable_non-contigous_BAS
-	cluster.take_ccfail_undef
-	motherboard.hostbridge.interfaceName
-	motherboard.smsc_91c111.mac_address
-
-[motherboard.smsc_91c111.mac_address]
-default="auto"
-
-[motherboard.hostbridge.interfaceName]
-default="%(interfaceName)s"
-
-[motherboard.mmc.p_mmc_file]
-default = {IMG}
-
-[motherboard.smsc_91c111.enabled]
-default = 1
-allowed = 0,1
-
-[cluster.NUM_CORES]
-default = 1
-allowed = 0,1
-
-[cluster.cpu0.unpredictable_WPMASKANDBAS]
-default = 0
-allowed = 0,1
-
-[cluster.cpu0.unpredictable_non-contigous_BAS]
-default = 0
-allowed = 0,1
-
-[cluster.cpu1.unpredictable_WPMASKANDBAS]
-default = 0
-allowed = 0,1
-
-[cluster.cpu1.unpredictable_non-contigous_BAS]
-default = 0
-allowed = 0,1
-
-[cluster.cpu2.unpredictable_WPMASKANDBAS]
-default = 0
-allowed = 0,1
-
-[cluster.cpu2.unpredictable_non-contigous_BAS]
-default = 0
-allowed = 0,1
-
-[cluster.cpu3.unpredictable_WPMASKANDBAS]
-default = 0
-allowed = 0,1
-
-[cluster.cpu3.unpredictable_non-contigous_BAS]
-default = 0
-allowed = 0,1
-
-[cluster.take_ccfail_undef]
-default = 0
-allowed = 0,1

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/snowball.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/snowball.conf	2013-08-28 14:55:50 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/snowball.conf	1970-01-01 00:00:00 +0000
@@ -1,41 +0,0 @@ 
-client_type = bootloader
-
-boot_device = 1
-
-boot_cmds = mmc rescan 1,
-    setenv bootcmd "'fatload mmc 1:3 0x00100000 /uImage;
-    bootm 0x00100000'",
-    setenv bootargs "'console=ttyAMA2,115200n8 ip=dhcp vmalloc=300M
-    root=/dev/mmcblk1p5 rootwait mem=128M@0 mali.mali_mem=32M@128M
-    hwmem=168M@160M mem=48M@328M mem_issw=1M@383M mem=640M@384M'",
-    boot
-
-boot_cmds_android = mmc rescan 1,
-    setenv bootcmd "'fatload mmc 1:3 0x00100000 /uImage'",
-    setenv bootargs "'console=ttyAMA2,115200n8 rootwait ro earlyprintk mem=128M@0 mali.mali_mem=32M@128M hwmem=168M@160M mem=48M@328M mem_issw=1M@383M mem=640M@384M vmalloc=500M init=/init androidboot.console=ttyAMA2 omapdss.def_disp=hdmi consoleblank=0'",
-    boot
-
-boot_cmds_fdt = mmc rescan 1,
-    setenv bootcmd "'fatload mmc 1:3 0x00100000 /uImage; fatload mmc 1:3 0x01000000 /uInitrd; fatload mmc 1:3 0x00f00000 /board.dtb; bootm 0x00100000 0x01000000 0x00f00000'"
-    setenv bootargs "'console=ttyAMA2,115200n8 root=/dev/mmcblk1p5 rootwait ro fixrtc nocompcache vmalloc=300M mem=128M@0 mali.mali_mem=32M@128M hwmem=168M@160M mem=48M@328M mem_issw=1M@383M mem=640M@384M'",
-    boot
-    
-bootloader_prompt = U8500
-
-lmc_dev_arg = snowball_sd
-
-soft_boot_cmd = echo "Restarting system."
-    echo 0x02 > /sys/kernel/debug/ab8500/register-bank
-    echo 0x02 > /sys/kernel/debug/ab8500/register-address
-    echo 0x01 > /sys/kernel/debug/ab8500/register-value
-    echo 0x01 > /sys/kernel/debug/ab8500/register-address 
-    echo 0x11 > /sys/kernel/debug/ab8500/register-value
-
-android_orig_block_device = mmcblk1
-android_lava_block_device = mmcblk1
-
-boot_options =
-    boot_cmds
-
-[boot_cmds]
-default = boot_cmds

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/vexpress-tc2.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/vexpress-tc2.conf	2013-09-03 21:46:48 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/vexpress-tc2.conf	1970-01-01 00:00:00 +0000
@@ -1,40 +0,0 @@ 
-client_type = vexpress
-
-boot_cmds = 2
-
-boot_cmds_android = 3
-
-interrupt_boot_prompt = The default boot selection will start in
-
-bootloader_prompt = Start:
-
-lmc_dev_arg = vexpress
-
-possible_partitions_files =
-    init.partitions.rc
-    fstab.partitions
-    init.rc
-    fstab.arm-versatileexpress
-
-uefi_image_filename = uefi_v2p-ca15-tc2.bin 
-
-vexpress_uefi_path = SOFTWARE/TC2/uefi.bin
-
-vexpress_uefi_backup_path = SOFTWARE/TC2/backup-uefi.bin
-
-vexpress_usb_mass_storage_device = /dev/disk/by-label/VEMSD
-
-read_boot_cmds_from_image = 0
-
-android_orig_block_device = mmcblk0
-android_lava_block_device = sda
-partition_padding_string_android =
-sys_part_android = 1
-sdcard_part_android = 3
-data_part_android = 2
-
-boot_options =
-    boot_cmds
-
-[boot_cmds]
-default = boot_cmds

=== removed file 'lava_dispatcher/default-config/lava-dispatcher/device-types/vexpress.conf'
--- lava_dispatcher/default-config/lava-dispatcher/device-types/vexpress.conf	2013-09-03 21:46:48 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/device-types/vexpress.conf	1970-01-01 00:00:00 +0000
@@ -1,21 +0,0 @@ 
-boot_cmds = 2
-
-boot_cmds_android = 3
-
-interrupt_boot_prompt = The default boot selection will start in
-
-bootloader_prompt = Start:
-
-possible_partitions_files =
-    init.partitions.rc
-    fstab.partitions
-    init.rc
-    fstab.arm-versatileexpress
-
-read_boot_cmds_from_image = 0
-
-boot_options =
-    boot_cmds
-
-[boot_cmds]
-default = boot_cmds

=== removed directory 'lava_dispatcher/default-config/lava-dispatcher/devices'
=== removed file 'lava_dispatcher/default-config/lava-dispatcher/lava-dispatcher.conf'
--- lava_dispatcher/default-config/lava-dispatcher/lava-dispatcher.conf	2013-06-05 21:43:24 +0000
+++ lava_dispatcher/default-config/lava-dispatcher/lava-dispatcher.conf	1970-01-01 00:00:00 +0000
@@ -1,38 +0,0 @@ 
-# General lava-dispatcher settings.
-
-# Main LAVA server IP in the lab.
-#
-# This is the IP the device downloads the image parts from.
-LAVA_SERVER_IP = 192.168.1.10
-
-# This is the address and port of cache proxy service, format is like:
-# LAVA_PROXY = http://192.168.1.10:3128/
-LAVA_PROXY =
-
-# Location for rootfs/boot tarballs extracted from images
-LAVA_IMAGE_TMPDIR = /linaro/images/tmp
-
-# URL where LAVA_IMAGE_TMPDIR can be accessed remotely
-LAVA_IMAGE_URL = http://%(LAVA_SERVER_IP)s/images/tmp
-
-# Location on the device for storing test results.
-LAVA_RESULT_DIR = /lava/results
-
-# Location for caching downloaded artifacts such as hwpacks and images
-LAVA_CACHEDIR = /linaro/images/cache
-
-# Python logging level to use
-# 10 = DEBUG
-# 20 = INFO
-# 30 = WARNING
-# 40 = ERROR
-# Messages with a lower number than LOGGING_LEVEL will be suppressed
-LOGGING_LEVEL = 20
-
-# The url point to the version of lava-test to be install with pip (default)
-LAVA_TEST_URL = bzr+http://bazaar.launchpad.net/~linaro-validation/lava-test/trunk/#egg=lava-test
-
-# The name of the lava-test package to be installed with apt-get
-# Use this variable if you want to use apt-get instead of pip to install lava-test
-# LAVA_TEST_DEB = lava-test
-LAVA_TEST_DEB = 

=== removed directory 'lava_dispatcher/device'
=== removed file 'lava_dispatcher/device/__init__.py'
--- lava_dispatcher/device/__init__.py	2012-09-28 18:30:48 +0000
+++ lava_dispatcher/device/__init__.py	1970-01-01 00:00:00 +0000
@@ -1,19 +0,0 @@ 
-# Copyright (C) 2011 Linaro Limited
-#
-# Author: Michael Hudson-Doyle <michael.hudson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.

=== removed file 'lava_dispatcher/device/boot_options.py'
--- lava_dispatcher/device/boot_options.py	2013-01-29 04:23:19 +0000
+++ lava_dispatcher/device/boot_options.py	1970-01-01 00:00:00 +0000
@@ -1,93 +0,0 @@ 
-# Copyright (C) 2012 Linaro Limited
-#
-# Author: Andy Doan <andy.doan@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import logging
-
-
-class BootOption(object):
-    """
-    Parses items from a config ini section into an options object.
-    """
-    def __init__(self, section, items, defval):
-        self.name = section
-        self.value = None
-        self.allowed = None
-        for item in items:
-            if item[0] == 'default':
-                self.value = item[1]
-            elif item[0] == 'allowed':
-                self.allowed = [x.strip() for x in item[1].split(',')]
-            else:
-                logging.warn('section(%s) contains unknown item: %s', section, item)
-        if defval:
-            self.value = defval
-
-    def valid(self, option):
-        if self.allowed:
-            return option in self.allowed
-        # if no "allowed" value is set, then we can accept anything
-        return True
-
-
-def as_dict(target, defaults={}):
-    """
-    converts the boot_options stanza for a device into a dictionary of
-    key value pairs for the option and its value
-
-    defaults - in some cases you need to override a default value specified
-    in the device's config. For example for boot_options with master.py, the
-    default for boot_cmds is boot_cmds. However, we really need to look at
-    the deployment_data's boot_cmds for the default so that booting
-    something like android will work.
-    """
-    options = {}
-    for opt in target.config.boot_options:
-        if opt in target.config.cp.sections():
-            defval = defaults.get(opt, None)
-            options[opt] = BootOption(opt, target.config.cp.items(opt), defval)
-        else:
-            logging.warn('no boot option config section for: %s', opt)
-
-    for opt in target.boot_options:
-        keyval = opt.split('=')
-        if len(keyval) != 2:
-            logging.warn("Invalid boot option format: %s", opt)
-        elif keyval[0] not in options:
-            logging.warn("Invalid boot option: %s", keyval[0])
-        elif not options[keyval[0]].valid(keyval[1]):
-            logging.warn("Invalid boot option value: %s", opt)
-        else:
-            options[keyval[0]].value = keyval[1]
-
-    return options
-
-
-def as_string(target, join_pattern, defaults={}):
-    """
-    pulls the options into a string via the join_pattern. The join pattern
-    can be something like "%s=%s"
-    """
-    options = as_dict(target, defaults)
-
-    cmd = ''
-    for option in options.values():
-        if option.value:
-            cmd += join_pattern % (option.name, option.value)
-    return cmd

=== removed file 'lava_dispatcher/device/bootloader.py'
--- lava_dispatcher/device/bootloader.py	2013-09-06 22:37:31 +0000
+++ lava_dispatcher/device/bootloader.py	1970-01-01 00:00:00 +0000
@@ -1,214 +0,0 @@ 
-# Copyright (C) 2013 Linaro Limited
-#
-# Author: Tyler Baker <tyler.baker@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.    See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import logging
-import contextlib
-import time
-import os
-
-from lava_dispatcher.device.master import (
-    MasterImageTarget
-)
-from lava_dispatcher.client.base import (
-    NetworkCommandRunner,
-)
-from lava_dispatcher.utils import (
-    string_to_list,
-    mk_targz,
-    rmtree,
-)
-from lava_dispatcher.errors import (
-    CriticalError,
-    OperationFailed,
-)
-from lava_dispatcher.downloader import (
-    download_image,
-    download_with_retry,
-)
-
-class BootloaderTarget(MasterImageTarget):
-
-    def __init__(self, context, config):
-        super(BootloaderTarget, self).__init__(context, config)
-        self._booted = False
-        self._boot_cmds = None
-        self._lava_cmds = None
-        self._uboot_boot = False
-        # This is the offset into the path, used to reference bootfiles
-        self._offset = self.scratch_dir.index('images')
-
-    def deploy_linaro_kernel(self, kernel, ramdisk, dtb, rootfs, bootloader,
-                             firmware, rootfstype, bootloadertype):
-        if bootloadertype == "u_boot":
-            # We assume we will be controlling u-boot
-            if kernel is not None:
-                # We have been passed kernel image, setup TFTP boot
-                self._uboot_boot = True
-                # We are not booted yet
-                self._booted = False
-                # We specify OE deployment data, vanilla as possible
-                self.deployment_data = self.target_map['oe']
-                # Set the TFTP server IP (Dispatcher)
-                self._lava_cmds = "lava_server_ip=" + \
-                                   self.context.config.lava_server_ip + ","
-                kernel = download_image(kernel, self.context,
-                                        self.scratch_dir, decompress=False)
-                self._lava_cmds += "lava_kernel=" + \
-                                    kernel[self._offset::] + ","
-                if ramdisk is not None:
-                    # We have been passed a ramdisk
-                    ramdisk = download_image(ramdisk, self.context,
-                                             self.scratch_dir,
-                                             decompress=False)
-                    self._lava_cmds += "lava_ramdisk=" + \
-                                        ramdisk[self._offset::] + ","
-                if dtb is not None:
-                    # We have been passed a device tree blob
-                    dtb = download_image(dtb, self.context,
-                                         self.scratch_dir, decompress=False)
-                    self._lava_cmds += "lava_dtb=" + dtb[self._offset::] + ","
-                if rootfs is not None:
-                    # We have been passed a rootfs
-                    rootfs = download_image(rootfs, self.context,
-                                            self.scratch_dir, decompress=False)
-                    self._lava_cmds += "lava_rootfs=" + \
-                                        rootfs[self._offset::] + ","
-                if bootloader is not None:
-                    # We have been passed a bootloader
-                    bootloader = download_image(bootloader, self.context,
-                                                self.scratch_dir,
-                                                decompress=False)
-                    self._lava_cmds += "lava_bootloader=" + \
-                                        bootloader[self._offset::] + ","
-                if firmware is not None:
-                    # We have been passed firmware
-                    firmware = download_image(firmware, self.context,
-                                              self.scratch_dir,
-                                              decompress=False)
-                    self._lava_cmds += "lava_firmware=" + \
-                                        firmware[self._offset::] + ","
-            else:
-                # This *should* never happen
-                raise CriticalError("No kernel images to boot")
-        else:
-            # Define other "types" of bootloaders here. UEFI? Grub?
-            raise CriticalError("U-Boot is the only supported bootloader \
-                                at this time")
-
-    def deploy_linaro(self, hwpack, rfs, bootloadertype):
-        self._uboot_boot = False
-        super(BootloaderTarget, self).deploy_linaro(hwpack, rfs,
-                                                            bootloadertype)
-
-    def deploy_linaro_prebuilt(self, image, bootloadertype):
-        self._uboot_boot = False
-        super(BootloaderTarget, self).deploy_linaro_prebuilt(image,
-                                                             bootloadertype)
-
-    def _inject_boot_cmds(self):
-        if self._is_job_defined_boot_cmds(self.config.boot_cmds):
-            logging.info('Overriding boot_cmds from job file')
-            self._boot_cmds = string_to_list(self._lava_cmds.encode('ascii')) \
-                                             + self.config.boot_cmds
-        else:
-            if self.config.boot_cmds_tftp is None:
-                raise CriticalError("No TFTP boot commands defined")
-            else:
-                logging.info('Loading boot_cmds from device configuration')
-                self._boot_cmds = self._lava_cmds + self.config.boot_cmds_tftp
-                self._boot_cmds = string_to_list(
-                                   self._boot_cmds.encode('ascii'))
-
-    def _run_boot(self):
-        self._enter_bootloader(self.proc)
-        self._inject_boot_cmds()
-        self._customize_bootloader(self.proc, self._boot_cmds)
-        self.proc.expect(self.config.image_boot_msg, timeout=300)
-        self._wait_for_prompt(self.proc, ['\(initramfs\)',
-                              self.config.master_str],
-                              self.config.boot_linaro_timeout)
-
-    def _boot_linaro_image(self):
-        if self._uboot_boot and not self._booted:
-            try:
-                if self.config.hard_reset_command:
-                    self._hard_reboot()
-                    self._run_boot()
-                else:
-                    self._soft_reboot()
-                    self._run_boot()
-            except:
-                raise OperationFailed("_run_boot failed")
-            self.proc.sendline('export PS1="%s"'
-                               % self.deployment_data['TESTER_PS1'])
-            self._booted = True
-        elif self._uboot_boot and self._booted:
-            self.proc.sendline('export PS1="%s"'
-                               % self.deployment_data['TESTER_PS1'])
-        else:
-            super(BootloaderTarget, self)._boot_linaro_image()
-
-    @contextlib.contextmanager
-    def file_system(self, partition, directory):
-        if self._uboot_boot:
-            try:
-                pat = self.deployment_data['TESTER_PS1_PATTERN']
-                incrc = self.deployment_data['TESTER_PS1_INCLUDES_RC']
-                runner = NetworkCommandRunner(self, pat, incrc)
-
-                targetdir = '/%s' % directory
-                runner.run('mkdir -p %s' % targetdir)
-                parent_dir, target_name = os.path.split(targetdir)
-                runner.run('/bin/tar -cmzf /tmp/fs.tgz -C %s %s'
-                           % (parent_dir, target_name))
-                runner.run('cd /tmp')  # need to be in same dir as fs.tgz
-
-                ip = runner.get_target_ip()
-                url_base = self._start_busybox_http_server(runner, ip)
-
-                url = url_base + '/fs.tgz'
-                logging.info("Fetching url: %s" % url)
-                tf = download_with_retry(self.context, self.scratch_dir,
-                                         url, False)
-
-                tfdir = os.path.join(self.scratch_dir, str(time.time()))
-
-                try:
-                    os.mkdir(tfdir)
-                    self.context.run_command('/bin/tar -C %s -xzf %s'
-                                             % (tfdir, tf))
-                    yield os.path.join(tfdir, target_name)
-                finally:
-                    tf = os.path.join(self.scratch_dir, 'fs.tgz')
-                    mk_targz(tf, tfdir)
-                    rmtree(tfdir)
-
-                    # get the last 2 parts of tf, ie "scratchdir/tf.tgz"
-                    tf = '/'.join(tf.split('/')[-2:])
-                    runner.run('rm -rf %s' % targetdir)
-                    self._target_extract(runner, tf, parent_dir)
-            finally:
-                self._stop_busybox_http_server(runner)
-        else:
-            with super(BootloaderTarget, self).file_system(
-                                                partition, directory) as path:
-                yield path
-
-target_class = BootloaderTarget

=== removed file 'lava_dispatcher/device/capri.py'
--- lava_dispatcher/device/capri.py	2013-09-13 01:25:58 +0000
+++ lava_dispatcher/device/capri.py	1970-01-01 00:00:00 +0000
@@ -1,155 +0,0 @@ 
-# Copyright (C) 2013 Linaro Limited
-#
-# Author: Tyler Baker <Tyler.Baker@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.    See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import logging
-import contextlib
-import time
-import os
-import pexpect
-
-from lava_dispatcher.device.target import (
-    Target
-)
-from lava_dispatcher.client.base import (
-    NetworkCommandRunner,
-)
-from lava_dispatcher.errors import (
-    CriticalError,
-)
-from lava_dispatcher.device.fastboot import (
-    FastbootTarget
-)
-from lava_dispatcher.device.master import (
-    MasterImageTarget
-)
-from lava_dispatcher.utils import (
-    mk_targz,
-    rmtree,
-)
-from lava_dispatcher.downloader import (
-    download_with_retry,
-)
-
-
-class CapriTarget(FastbootTarget, MasterImageTarget):
-
-    def __init__(self, context, config):
-        super(CapriTarget, self).__init__(context, config)
-
-    def _enter_fastboot(self):
-        if self.fastboot.on():
-            logging.debug("Device is on fastboot - no need to hard reset")
-            return
-        try:
-            self._soft_reboot()
-            self._enter_bootloader(self.proc)
-        except:
-            logging.exception("_enter_bootloader failed")
-            self._hard_reboot()
-            self._enter_bootloader(self.proc)
-        self.proc.sendline("fastboot")
-
-    def deploy_android(self, boot, system, userdata):
-
-        boot = self._get_image(boot)
-        system = self._get_image(system)
-        userdata = self._get_image(userdata)
-
-        self._enter_fastboot()
-        self.fastboot.flash('boot', boot)
-        self.fastboot.flash('system', system)
-        self.fastboot.flash('userdata', userdata)
-
-        self.deployment_data = Target.ubuntu_deployment_data
-        self.deployment_data['boot_image'] = boot
-
-    def power_on(self):
-        if not self.deployment_data.get('boot_image', False):
-            raise CriticalError('Deploy action must be run first')
-
-        if not self._booted:
-            self._enter_fastboot()
-            self.fastboot('reboot')
-            self._wait_for_prompt(self.proc,
-                                  self.context.device_config.master_str,
-                                  self.config.boot_linaro_timeout)
-
-            self._booted = True
-            self.proc.sendline('')
-            self.proc.sendline('')
-            self.proc.sendline('export PS1="%s"' % self.deployment_data['TESTER_PS1'])
-            self._runner = self._get_runner(self.proc)
-
-        return self.proc
-
-    @contextlib.contextmanager
-    def file_system(self, partition, directory):
-        try:
-            pat = self.deployment_data['TESTER_PS1_PATTERN']
-            incrc = self.deployment_data['TESTER_PS1_INCLUDES_RC']
-            runner = NetworkCommandRunner(self, pat, incrc)
-
-            targetdir = '/%s' % directory
-            runner.run('mkdir -p %s' % targetdir)
-            parent_dir, target_name = os.path.split(targetdir)
-            runner.run('/bin/tar -cmzf /tmp/fs.tgz -C %s %s'
-                       % (parent_dir, target_name))
-            runner.run('cd /tmp')  # need to be in same dir as fs.tgz
-
-            ip = runner.get_target_ip()
-
-            self.proc.sendline('python -m SimpleHTTPServer 0 2>/dev/null')
-            match_id = self.proc.expect([
-                'Serving HTTP on 0.0.0.0 port (\d+) \.\.',
-                pexpect.EOF, pexpect.TIMEOUT])
-            if match_id != 0:
-                msg = "Unable to start HTTP server on Capri"
-                logging.error(msg)
-                raise CriticalError(msg)
-            port = self.proc.match.groups()[match_id]
-
-            url = "http://%s:%s/fs.tgz" % (ip, port)
-
-            logging.info("Fetching url: %s" % url)
-            tf = download_with_retry(self.context, self.scratch_dir,
-                                     url, False)
-
-            tfdir = os.path.join(self.scratch_dir, str(time.time()))
-
-            try:
-                os.mkdir(tfdir)
-                self.context.run_command('/bin/tar -C %s -xzf %s'
-                                         % (tfdir, tf))
-                yield os.path.join(tfdir, target_name)
-            finally:
-                tf = os.path.join(self.scratch_dir, 'fs.tgz')
-                mk_targz(tf, tfdir)
-                rmtree(tfdir)
-
-                self.proc.sendcontrol('c')  # kill SimpleHTTPServer
-
-                # get the last 2 parts of tf, ie "scratchdir/tf.tgz"
-                tf = '/'.join(tf.split('/')[-2:])
-                runner.run('rm -rf %s' % targetdir)
-                self._target_extract(runner, tf, parent_dir)
-        finally:
-            self.proc.sendcontrol('c')  # kill SimpleHTTPServer
-
-target_class = CapriTarget

=== removed file 'lava_dispatcher/device/fastboot.py'
--- lava_dispatcher/device/fastboot.py	2013-07-18 14:01:21 +0000
+++ lava_dispatcher/device/fastboot.py	1970-01-01 00:00:00 +0000
@@ -1,204 +0,0 @@ 
-# Copyright (C) 2012 Linaro Limited
-#
-# Author: Antonio Terceiro <antonio.terceiro@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.    See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import subprocess
-from time import sleep
-import logging
-import contextlib
-
-from lava_dispatcher.device.target import (
-    Target
-)
-from lava_dispatcher.downloader import (
-    download_image
-)
-from lava_dispatcher.utils import (
-    mkdtemp
-)
-from lava_dispatcher.errors import (
-    CriticalError
-)
-
-
-def _call(context, cmd, ignore_failure, timeout):
-    cmd = 'timeout ' + str(timeout) + 's ' + cmd
-    context.run_command(cmd, failok=ignore_failure)
-
-
-class FastBoot(object):
-
-    def __init__(self, device):
-        self.device = device
-        self.context = device.context
-
-    def __call__(self, args, ignore_failure=False, timeout=600):
-        command = self.device.config.fastboot_command + ' ' + args
-        command = "flock /var/lock/lava-fastboot.lck " + command
-        _call(self.context, command, ignore_failure, timeout)
-
-    def enter(self):
-        if self.on():
-            logging.debug("Device is on fastboot - no need to hard reset")
-            return
-        try:
-            # First we try a gentle reset
-            self.device._adb(self.device.config.soft_boot_cmd)
-        except subprocess.CalledProcessError:
-            # Now a more brute force attempt. In this case the device is
-            # probably hung.
-            if self.device.config.hard_reset_command:
-                logging.debug("Will hard reset the device")
-                self.context.run_command(self.device.config.hard_reset_command)
-            else:
-                logging.critical(
-                    "Hard reset command not configured. "
-                    "Please reset the device manually."
-                )
-
-    def on(self):
-        try:
-            self('getvar all', timeout=2)
-            return True
-        except subprocess.CalledProcessError:
-            return False
-
-    def erase(self, partition):
-        self('erase %s' % partition)
-
-    def flash(self, partition, image):
-        self('flash %s %s' % (partition, image))
-
-    def boot(self, image):
-        # We need an extra bootloader reboot before actually booting the image
-        # to avoid the phone entering charging mode and getting stuck.
-        self('reboot')
-        # specifically after `fastboot reset`, we have to wait a little
-        sleep(10)
-        self('boot %s' % image)
-
-
-class FastbootTarget(Target):
-
-    def __init__(self, context, config):
-        super(FastbootTarget, self).__init__(context, config)
-
-        if not config.hard_reset_command:
-            logging.warn(
-                "Setting the hard_reset_command config option "
-                "is highly recommended!"
-            )
-
-        self._booted = False
-        self._working_dir = None
-        self.fastboot = FastBoot(self)
-
-    def deploy_android(self, boot, system, userdata):
-
-        boot = self._get_image(boot)
-        system = self._get_image(system)
-        userdata = self._get_image(userdata)
-
-        self.fastboot.enter()
-        self.fastboot.erase('boot')
-        self.fastboot.flash('system', system)
-        self.fastboot.flash('userdata', userdata)
-
-        self.deployment_data = Target.android_deployment_data
-        self.deployment_data['boot_image'] = boot
-
-    def power_on(self):
-        if not self.deployment_data.get('boot_image', False):
-            raise CriticalError('Deploy action must be run first')
-
-        self.fastboot.enter()
-        self.fastboot.boot(self.deployment_data['boot_image'])
-        self._adb('wait-for-device')
-
-        self._booted = True
-        proc = self._adb('shell', spawn=True)
-        proc.sendline("")  # required to put the adb shell in a reasonable state
-        proc.sendline("export PS1='%s'" % self.deployment_data['TESTER_PS1'])
-        self._runner = self._get_runner(proc)
-
-        return proc
-
-    def power_off(self, proc):
-        # We always leave the device on
-        pass
-
-    @contextlib.contextmanager
-    def file_system(self, partition, directory):
-
-        if not self._booted:
-            self.power_on()
-
-        mount_point = self._get_partition_mount_point(partition)
-
-        host_dir = '%s/mnt/%s' % (self.working_dir, directory)
-        target_dir = '%s/%s' % (mount_point, directory)
-
-        subprocess.check_call(['mkdir', '-p', host_dir])
-        self._adb('pull %s %s' % (target_dir, host_dir), ignore_failure=True)
-
-        yield host_dir
-
-        self._adb('push %s %s' % (host_dir, target_dir))
-
-    def get_device_version(self):
-        # this is tricky, because fastboot does not have a visible version
-        # number. For now let's use just the adb version number.
-        return subprocess.check_output(
-            "%s version | sed 's/.* version //'" % self.config.adb_command,
-            shell=True
-        ).strip()
-
-    # start of private methods
-
-    def _get_partition_mount_point(self, partition):
-        lookup = {
-            self.config.data_part_android_org: '/data',
-            self.config.sys_part_android_org: '/system',
-        }
-        return lookup[partition]
-
-    def _adb(self, args, ignore_failure=False, spawn=False, timeout=600):
-        cmd = self.config.adb_command + ' ' + args
-        if spawn:
-            return self.context.spawn(cmd, timeout=60)
-        else:
-            _call(self.context, cmd, ignore_failure, timeout)
-
-    def _get_image(self, url):
-        sdir = self.working_dir
-        image = download_image(url, self.context, sdir, decompress=False)
-        return image
-
-    @property
-    def working_dir(self):
-        if self.config.shared_working_directory is None or \
-                self.config.shared_working_directory.strip() == '':
-            return self.scratch_dir
-
-        if self._working_dir is None:
-            self._working_dir = mkdtemp(self.config.shared_working_directory)
-        return self._working_dir
-
-
-target_class = FastbootTarget

=== removed file 'lava_dispatcher/device/fastmodel.py'
--- lava_dispatcher/device/fastmodel.py	2013-08-30 22:15:05 +0000
+++ lava_dispatcher/device/fastmodel.py	1970-01-01 00:00:00 +0000
@@ -1,330 +0,0 @@ 
-# Copyright (C) 2012 Linaro Limited
-#
-# Author: Andy Doan <andy.doan@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import codecs
-import contextlib
-import cStringIO
-import logging
-import os
-import stat
-import subprocess
-
-import lava_dispatcher.device.boot_options as boot_options
-
-from lava_dispatcher.device.target import (
-    Target
-)
-from lava_dispatcher.client.lmc_utils import (
-    image_partition_mounted,
-    generate_android_image,
-    generate_fastmodel_image,
-)
-from lava_dispatcher.downloader import (
-    download_image,
-)
-from lava_dispatcher.test_data import (
-    create_attachment,
-)
-from lava_dispatcher.utils import (
-    ensure_directory,
-    extract_targz,
-    DrainConsoleOutput,
-    finalize_process,
-    string_to_list,
-)
-
-
-class FastModelTarget(Target):
-
-    PORT_PATTERN = 'terminal_0: Listening for serial connection on port (\d+)'
-    ANDROID_WALLPAPER = 'system/wallpaper_info.xml'
-    SYS_PARTITION = 2
-    DATA_PARTITION = 5
-
-    def __init__(self, context, config):
-        super(FastModelTarget, self).__init__(context, config)
-
-        self._sim_proc = None
-
-        self._axf = None
-        self._kernel = None
-        self._dtb = None
-        self._initrd = None
-        self._uefi = None
-        self._bootloadertype = 'u_boot'
-
-    def _customize_android(self):
-        with image_partition_mounted(self._sd_image, self.DATA_PARTITION) as d:
-            wallpaper = '%s/%s' % (d, self.ANDROID_WALLPAPER)
-            # delete the android active wallpaper as slows things down
-            self.context.run_command('sudo rm -f %s' % wallpaper)
-
-        with image_partition_mounted(self._sd_image, self.SYS_PARTITION) as d:
-            with open('%s/etc/mkshrc' % d, 'a') as f:
-                f.write('\n# LAVA CUSTOMIZATIONS\n')
-                #make sure PS1 is what we expect it to be
-                f.write('PS1="%s"\n' % self.ANDROID_TESTER_PS1)
-                if not self.config.enable_network_after_boot_android:
-                    # fast model usermode networking does not support ping
-                    f.write('alias ping="echo LAVA-ping override 1 received"\n')
-
-        self.deployment_data = Target.android_deployment_data
-
-    def _copy_needed_files_from_partition(self, partno, subdir):
-        with image_partition_mounted(self._sd_image, partno) as mntdir:
-            subdir = os.path.join(mntdir, subdir)
-            self._copy_needed_files_from_directory(subdir)
-
-    def _copy_first_find_from_list(self, subdir, odir, file_list):
-        f_path = None
-        for fname in file_list:
-            f_path = self._find_and_copy(subdir, odir, fname)
-            if f_path:
-                break
-
-        return f_path
-
-    def _copy_needed_files_from_directory(self, subdir):
-        odir = os.path.dirname(self._sd_image)
-        if self._bootloadertype == 'u_boot':
-            # Extract the bootwrapper from the image
-            if self.config.simulator_axf_files and self._axf is None:
-                self._axf = self._copy_first_find_from_list(subdir, odir,
-                                                            self.config.simulator_axf_files)
-            # Extract the kernel from the image
-            if self.config.simulator_kernel_files and self._kernel is None:
-                self._kernel = self._copy_first_find_from_list(subdir, odir,
-                                                               self.config.simulator_kernel_files)
-            # Extract the initrd from the image
-            if self.config.simulator_initrd_files and self._initrd is None:
-                self._initrd = self._copy_first_find_from_list(subdir, odir,
-                                                               self.config.simulator_initrd_files)
-            # Extract the dtb from the image
-            if self.config.simulator_dtb and self._dtb is None:
-                self._dtb = self._find_and_copy(
-                    subdir, odir, self.config.simulator_dtb)
-        elif self._bootloadertype == 'uefi':
-            # Extract the uefi binary from the image
-            if self.config.simulator_uefi and self._uefi is None:
-                self._uefi = self._find_and_copy(
-                    subdir, odir, self.config.simulator_uefi)
-
-    def _check_needed_files(self):
-        if self._bootloadertype == 'u_boot':
-            # AXF is needed when we are not using UEFI
-            if self._axf is None and self.config.simulator_axf_files:
-                raise RuntimeError('No AXF found, %r' %
-                                   self.config.simulator_axf_files)
-            # Kernel is needed only for b.L models
-            if self._kernel is None and self.config.simulator_kernel_files:
-                raise RuntimeError('No KERNEL found, %r' %
-                                   self.config.simulator_kernel_files)
-            # Initrd is needed only for b.L models
-            if self._initrd is None and self.config.simulator_initrd_files:
-                raise RuntimeError('No INITRD found, %r' %
-                                   self.config.simulator_initrd_files)
-            # DTB is needed only for b.L models
-            if self._dtb is None and self.config.simulator_dtb:
-                raise RuntimeError('No DTB found, %r' %
-                                   self.config.simulator_dtb)
-        elif self._bootloadertype == 'uefi':
-            # UEFI binary is needed when specified
-            if self._uefi is None and self.config.simulator_uefi:
-                raise RuntimeError('No UEFI binary found, %r' %
-                                   self.config.simulator_uefi)
-
-    def deploy_android(self, boot, system, data):
-        logging.info("Deploying Android on %s" % self.config.hostname)
-
-        self._boot = download_image(boot, self.context, decompress=False)
-        self._data = download_image(data, self.context, decompress=False)
-        self._system = download_image(system, self.context, decompress=False)
-
-        self._sd_image = '%s/android.img' % os.path.dirname(self._system)
-
-        generate_android_image(
-            self.context, 'vexpress-a9', self._boot, self._data, self._system, self._sd_image
-        )
-
-        self._copy_needed_files_from_partition(self.config.boot_part, '')
-
-        self._customize_android()
-
-    def deploy_linaro(self, hwpack=None, rootfs=None, bootloadertype='u_boot'):
-        hwpack = download_image(hwpack, self.context, decompress=False)
-        rootfs = download_image(rootfs, self.context, decompress=False)
-        odir = os.path.dirname(rootfs)
-
-        self._bootloadertype = bootloadertype
-
-        generate_fastmodel_image(self.context, hwpack, rootfs, odir, bootloadertype)
-        self._sd_image = '%s/sd.img' % odir
-
-        self._copy_needed_files_from_directory(odir)
-        self._copy_needed_files_from_partition(self.config.boot_part, 'rtsm')
-        self._copy_needed_files_from_partition(self.config.root_part, 'boot')
-
-        self._customize_linux(self._sd_image)
-
-    def deploy_linaro_prebuilt(self, image, bootloadertype):
-        self._sd_image = download_image(image, self.context)
-        self._bootloadertype = bootloadertype
-
-        self._copy_needed_files_from_partition(self.config.boot_part, 'rtsm')
-        self._copy_needed_files_from_partition(self.config.root_part, 'boot')
-
-        self._customize_linux(self._sd_image)
-
-    @contextlib.contextmanager
-    def file_system(self, partition, directory):
-        with image_partition_mounted(self._sd_image, partition) as mntdir:
-            path = '%s/%s' % (mntdir, directory)
-            ensure_directory(path)
-            yield path
-
-    def extract_tarball(self, tarball_url, partition, directory='/'):
-        logging.info('extracting %s to target' % tarball_url)
-
-        with image_partition_mounted(self._sd_image, partition) as mntdir:
-            tb = download_image(tarball_url, self.context, decompress=False)
-            extract_targz(tb, '%s/%s' % (mntdir, directory))
-
-    def _fix_perms(self):
-        """ The directory created for the image download/creation gets created
-        with tempfile.mkdtemp which grants permission only to the creator of
-        the directory. We need group access because the dispatcher may run
-        the simulator as a different user
-        """
-        d = os.path.dirname(self._sd_image)
-        os.chmod(d, stat.S_IRWXG | stat.S_IRWXU)
-        os.chmod(self._sd_image, stat.S_IRWXG | stat.S_IRWXU)
-        if self._axf:
-            os.chmod(self._axf, stat.S_IRWXG | stat.S_IRWXU)
-        if self._kernel:
-            os.chmod(self._kernel, stat.S_IRWXG | stat.S_IRWXU)
-        if self._initrd:
-            os.chmod(self._initrd, stat.S_IRWXG | stat.S_IRWXU)
-        if self._dtb:
-            os.chmod(self._dtb, stat.S_IRWXG | stat.S_IRWXU)
-        if self._uefi:
-            os.chmod(self._uefi, stat.S_IRWXG | stat.S_IRWXU)
-
-        #lmc ignores the parent directories group owner
-        st = os.stat(d)
-        os.chown(self._sd_image, st.st_uid, st.st_gid)
-        if self._axf:
-            os.chown(self._axf, st.st_uid, st.st_gid)
-        if self._kernel:
-            os.chown(self._kernel, st.st_uid, st.st_gid)
-        if self._initrd:
-            os.chown(self._initrd, st.st_uid, st.st_gid)
-        if self._dtb:
-            os.chown(self._dtb, st.st_uid, st.st_gid)
-        if self._uefi:
-            os.chown(self._uefi, st.st_uid, st.st_gid)
-
-    def power_off(self, proc):
-        super(FastModelTarget, self).power_off(proc)
-        finalize_process(self._sim_proc)
-        self._sim_proc = None
-
-    def _create_rtsm_ostream(self, ofile):
-        """the RTSM binary uses the windows code page(cp1252), but the
-        dashboard and celery needs data with a utf-8 encoding"""
-        return codecs.EncodedFile(ofile, 'cp1252', 'utf-8')
-
-    def _drain_sim_proc(self):
-        """pexpect will continue to get data for the simproc process. We need
-        to keep this pipe drained so that it won't get full and then stop block
-        the process from continuing to execute"""
-
-        f = cStringIO.StringIO()
-        self._sim_proc.logfile = self._create_rtsm_ostream(f)
-        DrainConsoleOutput(proc=self._sim_proc).start()
-
-    def power_on(self):
-        if self._sim_proc is not None:
-            logging.warning("device was still on, shutting down")
-            self.power_off(None)
-
-        self._check_needed_files()
-
-        self._fix_perms()
-
-        options = boot_options.as_string(self, join_pattern=' -C %s=%s')
-
-        if self.config.simulator_boot_wrapper and self._uefi is None:
-            options = '%s %s' % (self.config.simulator_boot_wrapper, options)
-
-        sim_cmd = '%s %s' % (self.config.simulator_command, options)
-        sim_cmd = sim_cmd.format(
-            AXF=self._axf, IMG=self._sd_image, KERNEL=self._kernel,
-            DTB=self._dtb, INITRD=self._initrd, UEFI=self._uefi)
-
-        # the simulator proc only has stdout/stderr about the simulator
-        # we hook up into a telnet port which emulates a serial console
-        logging.info('launching fastmodel with command %r' % sim_cmd)
-        self._sim_proc = self.context.spawn(sim_cmd, timeout=1200)
-        self._sim_proc.expect(self.PORT_PATTERN, timeout=300)
-        self._serial_port = self._sim_proc.match.groups()[0]
-        logging.info('serial console port on: %s' % self._serial_port)
-
-        match = self._sim_proc.expect(["ERROR: License check failed!",
-                                       "Simulation is started"])
-        if match == 0:
-            raise RuntimeError("fast model license check failed")
-
-        self._drain_sim_proc()
-
-        logging.info('simulator is started connecting to serial port')
-        self.proc = self.context.spawn(
-            'telnet localhost %s' % self._serial_port,
-            timeout=1200)
-        self.proc.logfile_read = self._create_rtsm_ostream(
-            self.proc.logfile_read)
-
-        if self._uefi:
-            self._enter_bootloader(self.proc)
-            if self._is_job_defined_boot_cmds(self.config.boot_cmds):
-                boot_cmds = self.config.boot_cmds
-            else:
-                boot_cmds = string_to_list(self.config.boot_cmds.encode('ascii'))
-            self._customize_bootloader(self.proc, boot_cmds)
-
-        return self.proc
-
-    def get_test_data_attachments(self):
-        """returns attachments to go in the "lava_results" test run"""
-        # if the simulator never got started we won't even get to a logfile
-        if getattr(self._sim_proc, 'logfile', None) is not None:
-            if getattr(self._sim_proc.logfile, 'getvalue', None) is not None:
-                content = self._sim_proc.logfile.getvalue()
-                return [create_attachment('rtsm.log', content)]
-        return []
-
-    def get_device_version(self):
-        cmd = self.config.simulator_version_command
-        try:
-            return subprocess.check_output(cmd, shell=True).strip()
-        except subprocess.CalledProcessError:
-            return "unknown"
-
-
-target_class = FastModelTarget

=== removed file 'lava_dispatcher/device/ipmi_pxe.py'
--- lava_dispatcher/device/ipmi_pxe.py	2013-09-10 17:18:18 +0000
+++ lava_dispatcher/device/ipmi_pxe.py	1970-01-01 00:00:00 +0000
@@ -1,237 +0,0 @@ 
-# Copyright (C) 2012 Linaro Limited
-#
-# Author: Michael Hudson-Doyle <michael.hudson@linaro.org>
-# Author: Nicholas Schutt <nick.schutt@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.    See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import contextlib
-import logging
-import os
-import time
-
-from lava_dispatcher.device.master import (
-    MasterCommandRunner,
-)
-from lava_dispatcher.device.target import (
-    Target
-)
-from lava_dispatcher.errors import (
-    CriticalError,
-)
-from lava_dispatcher.downloader import (
-    download_image,
-    download_with_retry,
-)
-from lava_dispatcher.utils import (
-    mk_targz,
-    rmtree,
-)
-from lava_dispatcher.client.lmc_utils import (
-    generate_image,
-)
-from lava_dispatcher.ipmi import IpmiPxeBoot
-
-
-class IpmiPxeTarget(Target):
-
-    MASTER_PS1 = 'root@master [rc=$(echo \$?)]# '
-    MASTER_PS1_PATTERN = 'root@master \[rc=(\d+)\]# '
-
-    def __init__(self, context, config):
-        super(IpmiPxeTarget, self).__init__(context, config)
-        self.proc = self.context.spawn(self.config.connection_command, timeout=1200)
-        self.device_version = None
-        if self.config.ecmeip is None:
-            msg = "The ecmeip address is not set for this target"
-            logging.error(msg)
-            raise CriticalError(msg)
-        self.bootcontrol = IpmiPxeBoot(context, self.config.ecmeip)
-
-    def get_device_version(self):
-        return self.device_version
-
-    def power_on(self):
-        self.bootcontrol.power_on_boot_image()
-        return self.proc
-
-    def power_off(self, proc):
-        pass
-
-    def deploy_linaro(self, hwpack, rfs, bootloadertype):
-        image_file = generate_image(self, hwpack, rfs, self.scratch_dir, bootloadertype,
-                                    extra_boot_args='1', image_size='1G')
-        self._customize_linux(image_file)
-        self._deploy_image(image_file, '/dev/sda')
-
-    def deploy_linaro_prebuilt(self, image, bootloadertype):
-        image_file = download_image(image, self.context, self.scratch_dir)
-        self._customize_linux(image_file)
-        self._deploy_image(image_file, '/dev/sda')
-
-    def _deploy_image(self, image_file, device):
-        with self._as_master() as runner:
-
-            # erase the first part of the disk to make sure the new deploy works
-            runner.run("dd if=/dev/zero of=%s bs=4M count=4" % device, timeout=1800)
-
-            # compress the image to reduce the transfer size
-            if not image_file.endswith('.bz2') and not image_file.endswith('gz'):
-                os.system('bzip2 -9v ' + image_file)
-                image_file += '.bz2'
-
-            tmpdir = self.context.config.lava_image_tmpdir
-            url = self.context.config.lava_image_url
-            image_file = image_file.replace(tmpdir, '')
-            image_url = '/'.join(u.strip('/') for u in [url, image_file])
-
-            build_dir = '/builddir'
-            image_file_base = build_dir + '/' + '/'.join(image_file.split('/')[-1:])
-
-            decompression_cmd = None
-            if image_file_base.endswith('.gz'):
-                decompression_cmd = '/bin/gzip -dc'
-            elif image_file_base.endswith('.bz2'):
-                decompression_cmd = '/bin/bzip2 -dc'
-
-            runner.run('mkdir %s' % build_dir)
-            runner.run('mount -t tmpfs -o size=100%% tmpfs %s' % build_dir)
-            runner.run('wget -O %s %s' % (image_file_base, image_url), timeout=1800)
-
-            if decompression_cmd is not None:
-                cmd = '%s %s | dd bs=4M of=%s' % (decompression_cmd, image_file_base, device)
-            else:
-                cmd = 'dd bs=4M if=%s of=%s' % (image_file_base, device)
-
-            runner.run(cmd, timeout=1800)
-            runner.run('umount %s' % build_dir)
-
-            self.resize_rootfs_partition(runner)
-
-    def get_partition(self, runner, partition):
-        if partition == self.config.boot_part:
-            partition = '/dev/disk/by-label/boot'
-        elif partition == self.config.root_part:
-            partition = '/dev/disk/by-label/rootfs'
-        else:
-            raise RuntimeError(
-                'unknown master image partition(%d)' % partition)
-        return partition
-
-    def resize_rootfs_partition(self, runner):
-        partno = self.config.root_part
-        start = None
-
-        runner.run('parted -s /dev/sda print',
-                   response='\s+%s\s+([0-9.]+.B)\s+\S+\s+\S+\s+primary\s+(\S+)' % partno,
-                   wait_prompt=False)
-        if runner.match_id != 0:
-            msg = "Unable to determine rootfs partition"
-            logging.warning(msg)
-        else:
-            start = runner.match.group(1)
-            parttype = runner.match.group(2)
-
-            if parttype == 'ext2' or parttype == 'ext3' or parttype == 'ext4':
-                runner.run('parted -s /dev/sda rm %s' % partno)
-                runner.run('parted -s /dev/sda mkpart primary %s 100%%' % start)
-                runner.run('resize2fs -f /dev/sda%s' % partno)
-            elif parttype == 'brtfs':
-                logging.warning("resize of btrfs partition not supported")
-            else:
-                logging.warning("unknown partition type for resize: %s" % parttype)
-
-    @contextlib.contextmanager
-    def file_system(self, partition, directory):
-        logging.info('attempting to access master filesystem %r:%s' %
-                     (partition, directory))
-
-        assert directory != '/', "cannot mount entire partition"
-
-        with self._as_master() as runner:
-            runner.run('mkdir -p /mnt')
-            partition = self.get_partition(runner, partition)
-            runner.run('mount %s /mnt' % partition)
-            try:
-                targetdir = '/mnt/%s' % directory
-                runner.run('mkdir -p %s' % targetdir)
-
-                parent_dir, target_name = os.path.split(targetdir)
-
-                runner.run('/bin/tar -cmzf /tmp/fs.tgz -C %s %s' % (parent_dir, target_name))
-                runner.run('cd /tmp')  # need to be in same dir as fs.tgz
-
-                ip = runner.get_target_ip()
-                url_base = self._start_busybox_http_server(runner, ip)
-
-                url = url_base + '/fs.tgz'
-                logging.info("Fetching url: %s" % url)
-                tf = download_with_retry(self.context, self.scratch_dir, url, False)
-
-                tfdir = os.path.join(self.scratch_dir, str(time.time()))
-
-                try:
-                    os.mkdir(tfdir)
-                    self.context.run_command('/bin/tar -C %s -xzf %s' % (tfdir, tf))
-                    yield os.path.join(tfdir, target_name)
-
-                finally:
-                    tf = os.path.join(self.scratch_dir, 'fs.tgz')
-                    mk_targz(tf, tfdir)
-                    rmtree(tfdir)
-
-                    # get the last 2 parts of tf, ie "scratchdir/tf.tgz"
-                    tf = '/'.join(tf.split('/')[-2:])
-                    runner.run('rm -rf %s' % targetdir)
-                    self._target_extract(runner, tf, parent_dir)
-
-            finally:
-                    self._stop_busybox_http_server(runner)
-                    runner.run('umount /mnt')
-
-    @contextlib.contextmanager
-    def _as_master(self):
-        self.bootcontrol.power_on_boot_master()
-        self.proc.expect("\(initramfs\)")
-        self.proc.sendline('export PS1="%s"' % self.MASTER_PS1)
-        self.proc.expect(self.MASTER_PS1_PATTERN, timeout=180, lava_no_logging=1)
-        runner = MasterCommandRunner(self)
-
-        runner.run(". /scripts/functions")
-        runner.run("DEVICE=%s configure_networking" %
-                   self.config.default_network_interface)
-
-        # we call dhclient even though configure_networking above already
-        # picked up a IP address. configure_networking brings the interface up,
-        # but does not configure DNS properly. dhclient needs the interface to
-        # be up, and will set DNS correctly. In the end we are querying DHCP
-        # twice, but with a properly configured DHCP server (i.e. one that will
-        # give the same address for a given MAC address), this should not be a
-        # problem.
-        runner.run("mkdir -p /var/run")
-        runner.run("mkdir -p /var/lib/dhcp")
-        runner.run("dhclient -v -1")
-
-        self.device_version = runner.get_device_version()
-
-        try:
-            yield runner
-        finally:
-            logging.debug("deploy done")
-
-
-target_class = IpmiPxeTarget

=== removed file 'lava_dispatcher/device/k3v2.py'
--- lava_dispatcher/device/k3v2.py	2013-08-28 14:55:50 +0000
+++ lava_dispatcher/device/k3v2.py	1970-01-01 00:00:00 +0000
@@ -1,79 +0,0 @@ 
-# Copyright (C) 2013 Linaro Limited
-#
-# Author: Tyler Baker <Tyler.Baker@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.    See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-from time import sleep
-from lava_dispatcher.device.target import (
-    Target
-)
-from lava_dispatcher.device.fastboot import (
-    FastbootTarget
-)
-from lava_dispatcher.utils import (
-    connect_to_serial,
-)
-from lava_dispatcher.errors import (
-    CriticalError,
-)
-
-
-class K3V2Target(FastbootTarget):
-
-    def __init__(self, context, config):
-        super(K3V2Target, self).__init__(context, config)
-        self.proc = None
-
-    def deploy_android(self, boot, system, userdata):
-
-        boot = self._get_image(boot)
-        system = self._get_image(system)
-        userdata = self._get_image(userdata)
-
-        self.fastboot.enter()
-        # Need to sleep and wait for the first stage bootloaders to initialize.
-        sleep(10)
-        self.fastboot.flash('boot', boot)
-        self.fastboot.flash('system', system)
-        self.fastboot.flash('userdata', userdata)
-
-        self.deployment_data = Target.android_deployment_data
-        self.deployment_data['boot_image'] = boot
-
-    def power_on(self):
-        if not self.deployment_data.get('boot_image', False):
-            raise CriticalError('Deploy action must be run first')
-
-        # The k3v2 does not implement booting kernel from ram.
-        # So instead we must flash the boot image, and reboot.
-        self.fastboot.enter()
-        self.fastboot('reboot')
-        if self.proc is None:
-            self.proc = connect_to_serial(self.context)
-        self.proc.expect(self.context.device_config.master_str, timeout=300)
-
-        self._adb('wait-for-device')
-
-        self._booted = True
-        self.proc.sendline("")  # required to put the adb shell in a reasonable state
-        self.proc.sendline("export PS1='%s'" % self.deployment_data['TESTER_PS1'])
-        self._runner = self._get_runner(self.proc)
-
-        return self.proc
-
-target_class = K3V2Target

=== removed file 'lava_dispatcher/device/master.py'
--- lava_dispatcher/device/master.py	2013-09-04 19:19:19 +0000
+++ lava_dispatcher/device/master.py	1970-01-01 00:00:00 +0000
@@ -1,769 +0,0 @@ 
-# Copyright (C) 2011 Linaro Limited
-#
-# Author: Michael Hudson-Doyle <michael.hudson@linaro.org>
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import contextlib
-import logging
-import os
-import time
-import re
-
-import pexpect
-
-from lava_dispatcher.device import boot_options
-from lava_dispatcher import tarballcache
-
-from lava_dispatcher.client.base import (
-    NetworkCommandRunner,
-)
-from lava_dispatcher.device.target import (
-    Target
-)
-from lava_dispatcher.downloader import (
-    download_image,
-    download_with_retry,
-)
-from lava_dispatcher.errors import (
-    NetworkError,
-    CriticalError,
-    OperationFailed,
-)
-from lava_dispatcher.utils import (
-    connect_to_serial,
-    mk_targz,
-    string_to_list,
-    rmtree,
-    mkdtemp,
-    extract_targz,
-)
-from lava_dispatcher.client.lmc_utils import (
-    generate_image,
-    image_partition_mounted,
-)
-
-
-class MasterImageTarget(Target):
-
-    MASTER_PS1 = ' [rc=$(echo \$?)]# '
-    MASTER_PS1_PATTERN = ' \[rc=(\d+)\]# '
-
-    def __init__(self, context, config):
-        super(MasterImageTarget, self).__init__(context, config)
-
-        # Update variable according to config file
-        self.MASTER_PS1 = self.config.master_str + self.MASTER_PS1
-        self.MASTER_PS1_PATTERN = self.config.master_str + self.MASTER_PS1_PATTERN
-
-        Target.android_deployment_data['boot_cmds'] = 'boot_cmds_android'
-        Target.ubuntu_deployment_data['boot_cmds'] = 'boot_cmds'
-        Target.oe_deployment_data['boot_cmds'] = 'boot_cmds_oe'
-        Target.fedora_deployment_data['boot_cmds'] = 'boot_cmds'
-
-        # used for tarballcache logic to get proper boot_cmds
-        Target.ubuntu_deployment_data['data_type'] = 'ubuntu'
-        Target.oe_deployment_data['data_type'] = 'oe'
-        Target.fedora_deployment_data['data_type'] = 'fedora'
-        self.target_map = {
-            'android': Target.android_deployment_data,
-            'oe': Target.oe_deployment_data,
-            'ubuntu': Target.ubuntu_deployment_data,
-            'fedora': Target.fedora_deployment_data,
-        }
-
-        self.master_ip = None
-        self.device_version = None
-
-        if config.pre_connect_command:
-            self.context.run_command(config.pre_connect_command)
-
-        self.proc = connect_to_serial(self.context)
-
-    def get_device_version(self):
-        return self.device_version
-
-    def power_on(self):
-        self._boot_linaro_image()
-        return self.proc
-
-    def power_off(self, proc):
-        if self.config.power_off_cmd:
-            self.context.run_command(self.config.power_off_cmd)
-
-    def deploy_linaro(self, hwpack, rfs, bootloadertype):
-        self.boot_master_image()
-
-        image_file = generate_image(self, hwpack, rfs, self.scratch_dir, bootloadertype)
-        (boot_tgz, root_tgz, data) = self._generate_tarballs(image_file)
-
-        self._read_boot_cmds(boot_tgz=boot_tgz)
-        self._deploy_tarballs(boot_tgz, root_tgz)
-
-    def deploy_android(self, boot, system, userdata):
-        self.boot_master_image()
-
-        sdir = self.scratch_dir
-        boot = download_image(boot, self.context, sdir, decompress=False)
-        system = download_image(system, self.context, sdir, decompress=False)
-        data = download_image(userdata, self.context, sdir, decompress=False)
-
-        with self._as_master() as master:
-            self._format_testpartition(master, 'ext4')
-            self._deploy_android_tarballs(master, boot, system, data)
-
-            if master.has_partition_with_label('userdata') and \
-                    master.has_partition_with_label('sdcard'):
-                _purge_linaro_android_sdcard(master)
-
-        self.deployment_data = Target.android_deployment_data
-
-    def _deploy_android_tarballs(self, master, boot, system, data):
-        tmpdir = self.context.config.lava_image_tmpdir
-        url = self.context.config.lava_image_url
-
-        boot = boot.replace(tmpdir, '')
-        system = system.replace(tmpdir, '')
-        data = data.replace(tmpdir, '')
-
-        boot_url = '/'.join(u.strip('/') for u in [url, boot])
-        system_url = '/'.join(u.strip('/') for u in [url, system])
-        data_url = '/'.join(u.strip('/') for u in [url, data])
-
-        _deploy_linaro_android_boot(master, boot_url, self)
-        _deploy_linaro_android_system(master, system_url)
-        _deploy_linaro_android_data(master, data_url)
-
-    def deploy_linaro_prebuilt(self, image, bootloadertype):
-        self.boot_master_image()
-
-        if self.context.job_data.get('health_check', False):
-            (boot_tgz, root_tgz, data) = tarballcache.get_tarballs(
-                self.context, image, self.scratch_dir, self._generate_tarballs)
-            self.deployment_data = self.target_map[data]
-        else:
-            image_file = download_image(image, self.context, self.scratch_dir)
-            (boot_tgz, root_tgz, data) = self._generate_tarballs(image_file)
-
-        self._read_boot_cmds(boot_tgz=boot_tgz)
-        self._deploy_tarballs(boot_tgz, root_tgz)
-
-    def _deploy_tarballs(self, boot_tgz, root_tgz):
-        tmpdir = self.context.config.lava_image_tmpdir
-        url = self.context.config.lava_image_url
-
-        boot_tarball = boot_tgz.replace(tmpdir, '')
-        root_tarball = root_tgz.replace(tmpdir, '')
-        boot_url = '/'.join(u.strip('/') for u in [url, boot_tarball])
-        root_url = '/'.join(u.strip('/') for u in [url, root_tarball])
-        with self._as_master() as master:
-            self._format_testpartition(master, 'ext4')
-            try:
-                _deploy_linaro_rootfs(master, root_url)
-                _deploy_linaro_bootfs(master, boot_url)
-            except:
-                logging.exception("Deployment failed")
-                raise CriticalError("Deployment failed")
-
-    def _rewrite_partition_number(self, matchobj):
-        """ Returns the partition number after rewriting it to
-        n + testboot_offset.
-        """
-        boot_device = str(self.config.boot_device)
-        testboot_offset = self.config.testboot_offset
-        partition = int(matchobj.group('partition')) + testboot_offset
-        return ' ' + boot_device + ':' + str(partition) + ' '
-
-    def _rewrite_boot_cmds(self, boot_cmds):
-        """
-        Returns boot_cmds list after rewriting things such as:
-
-        * partition number from n to n + testboot_offset
-        * root=LABEL=testrootfs instead of root=UUID=ab34-...
-        """
-        boot_cmds = re.sub(
-            r"root=UUID=\S+", "root=LABEL=testrootfs", boot_cmds, re.MULTILINE)
-
-        pattern = "\s+\d+:(?P<partition>\d+)\s+"
-        boot_cmds = re.sub(
-            pattern, self._rewrite_partition_number, boot_cmds, re.MULTILINE)
-
-        return boot_cmds.split('\n')
-
-    def _read_boot_cmds(self, image=None, boot_tgz=None):
-        boot_file_path = None
-
-        if not self.config.read_boot_cmds_from_image:
-            return
-
-        # If we have already obtained boot commands dynamically, then return.
-        if self.deployment_data.get('boot_cmds_dynamic', False):
-            logging.debug("We already have boot commands in place.")
-            return
-
-        if image:
-            boot_part = self.config.boot_part
-            # Read boot related file from the boot partition of image.
-            with image_partition_mounted(image, boot_part) as mnt:
-                for boot_file in self.config.boot_files:
-                    boot_path = os.path.join(mnt, boot_file)
-                    if os.path.exists(boot_path):
-                        boot_file_path = boot_path
-                        break
-
-        elif boot_tgz:
-            tmp_dir = mkdtemp()
-            extracted_files = extract_targz(boot_tgz, tmp_dir)
-            for boot_file in self.config.boot_files:
-                for file_path in extracted_files:
-                    if boot_file == os.path.basename(file_path):
-                        boot_file_path = file_path
-                        break
-
-        if boot_file_path and os.path.exists(boot_file_path):
-            with open(boot_file_path, 'r') as f:
-                boot_cmds = self._rewrite_boot_cmds(f.read())
-                self.deployment_data['boot_cmds_dynamic'] = boot_cmds
-        else:
-            logging.debug("Unable to read boot commands dynamically.")
-
-    def _format_testpartition(self, runner, fstype):
-        logging.info("Format testboot and testrootfs partitions")
-        runner.run('umount /dev/disk/by-label/testrootfs', failok=True)
-        runner.run('nice mkfs -t %s -q /dev/disk/by-label/testrootfs -L testrootfs'
-                   % fstype, timeout=1800)
-        runner.run('umount /dev/disk/by-label/testboot', failok=True)
-        runner.run('nice mkfs.vfat /dev/disk/by-label/testboot -n testboot')
-
-    def _generate_tarballs(self, image_file):
-        self._customize_linux(image_file)
-        self._read_boot_cmds(image=image_file)
-        boot_tgz = os.path.join(self.scratch_dir, "boot.tgz")
-        root_tgz = os.path.join(self.scratch_dir, "root.tgz")
-        try:
-            _extract_partition(image_file, self.config.boot_part, boot_tgz)
-            _extract_partition(image_file, self.config.root_part, root_tgz)
-        except:
-            logging.exception("Failed to generate tarballs")
-            raise
-
-        # we need to associate the deployment data with these so that we
-        # can provide the proper boot_cmds later on in the job
-        data = self.deployment_data['data_type']
-        return boot_tgz, root_tgz, data
-
-    def target_extract(self, runner, tar_url, dest, timeout=-1, num_retry=5):
-        decompression_char = ''
-        if tar_url.endswith('.gz') or tar_url.endswith('.tgz'):
-            decompression_char = 'z'
-        elif tar_url.endswith('.bz2'):
-            decompression_char = 'j'
-        else:
-            raise RuntimeError('bad file extension: %s' % tar_url)
-
-        while num_retry > 0:
-            try:
-                runner.run(
-                    'wget --no-check-certificate --no-proxy '
-                    '--connect-timeout=30 -S --progress=dot -e dotbytes=2M '
-                    '-O- %s | '
-                    'tar --warning=no-timestamp --numeric-owner -C %s -x%sf -'
-                    % (tar_url, dest, decompression_char),
-                    timeout=timeout)
-                return
-            except (OperationFailed, pexpect.TIMEOUT):
-                logging.warning(("transfering %s failed. %d retry left."
-                                 % (tar_url, num_retry - 1)))
-
-            if num_retry > 1:
-                # send CTRL C in case wget still hasn't exited.
-                self.proc.sendcontrol("c")
-                self.proc.sendline(
-                    "echo 'retry left %s time(s)'" % (num_retry - 1))
-                # And wait a little while.
-                sleep_time = 60
-                logging.info("Wait %d second before retry" % sleep_time)
-                time.sleep(sleep_time)
-            num_retry -= 1
-
-        raise RuntimeError('extracting %s on target failed' % tar_url)
-
-    def get_partition(self, runner, partition):
-        if partition == self.config.boot_part:
-            partition = '/dev/disk/by-label/testboot'
-        elif partition == self.config.root_part:
-            partition = '/dev/disk/by-label/testrootfs'
-        elif partition == self.config.sdcard_part_android_org:
-            partition = '/dev/disk/by-label/sdcard'
-        elif partition == self.config.data_part_android_org:
-            lbl = _android_data_label(runner)
-            partition = '/dev/disk/by-label/%s' % lbl
-        else:
-            raise RuntimeError(
-                'unknown master image partition(%d)' % partition)
-        return partition
-
-    @contextlib.contextmanager
-    def file_system(self, partition, directory):
-        logging.info('attempting to access master filesystem %r:%s' %
-                     (partition, directory))
-
-        assert directory != '/', "cannot mount entire partition"
-
-        with self._as_master() as runner:
-            partition = self.get_partition(runner, partition)
-            runner.run('mount %s /mnt' % partition)
-            try:
-                targetdir = os.path.join('/mnt/%s' % directory)
-                if not runner.is_file_exist(targetdir):
-                    runner.run('mkdir %s' % targetdir)
-
-                parent_dir, target_name = os.path.split(targetdir)
-
-                runner.run('nice tar -czf /tmp/fs.tgz -C %s %s' %
-                           (parent_dir, target_name))
-                runner.run('cd /tmp')  # need to be in same dir as fs.tgz
-                self.proc.sendline('python -m SimpleHTTPServer 0 2>/dev/null')
-                match_id = self.proc.expect([
-                    'Serving HTTP on 0.0.0.0 port (\d+) \.\.',
-                    pexpect.EOF, pexpect.TIMEOUT])
-                if match_id != 0:
-                    msg = "Unable to start HTTP server on master"
-                    logging.error(msg)
-                    raise CriticalError(msg)
-                port = self.proc.match.groups()[match_id]
-
-                url = "http://%s:%s/fs.tgz" % (self.master_ip, port)
-                tf = download_with_retry(
-                    self.context, self.scratch_dir, url, False)
-
-                tfdir = os.path.join(self.scratch_dir, str(time.time()))
-                try:
-                    os.mkdir(tfdir)
-                    self.context.run_command('nice tar -C %s -xzf %s' % (tfdir, tf))
-                    yield os.path.join(tfdir, target_name)
-
-                finally:
-                    tf = os.path.join(self.scratch_dir, 'fs.tgz')
-                    mk_targz(tf, tfdir)
-                    rmtree(tfdir)
-
-                    self.proc.sendcontrol('c')  # kill SimpleHTTPServer
-
-                    # get the last 2 parts of tf, ie "scratchdir/tf.tgz"
-                    tf = '/'.join(tf.split('/')[-2:])
-                    url = '%s/%s' % (self.context.config.lava_image_url, tf)
-                    runner.run('rm -rf %s' % targetdir)
-                    self.target_extract(runner, url, parent_dir)
-
-            finally:
-                    self.proc.sendcontrol('c')  # kill SimpleHTTPServer
-                    runner.run('umount /mnt')
-
-    def extract_tarball(self, tarball_url, partition, directory='/'):
-        logging.info('extracting %s to target' % tarball_url)
-
-        with self._as_master() as runner:
-            partition = self.get_partition(runner, partition)
-            runner.run('mount %s /mnt' % partition)
-            try:
-                self.target_extract(runner, tarball_url, '/mnt/%s' % directory)
-            finally:
-                runner.run('umount /mnt')
-
-    def _wait_for_master_boot(self):
-        self.proc.expect(self.config.image_boot_msg, timeout=300)
-        self._wait_for_prompt(self.proc, self.config.master_str, timeout=300)
-
-    def boot_master_image(self):
-        """
-        reboot the system, and check that we are in a master shell
-        """
-        boot_attempts = self.config.boot_retries
-        attempts = 0
-        in_master_image = False
-        while (attempts < boot_attempts) and (not in_master_image):
-            logging.info("Booting the system master image. Attempt: %d" %
-                         (attempts + 1))
-            try:
-                self._soft_reboot()
-                self._wait_for_master_boot()
-            except (OperationFailed, pexpect.TIMEOUT) as e:
-                logging.info("Soft reboot failed: %s" % e)
-                try:
-                    self._hard_reboot()
-                    self._wait_for_master_boot()
-                except (OperationFailed, pexpect.TIMEOUT) as e:
-                    msg = "Hard reboot into master image failed: %s" % e
-                    logging.warning(msg)
-                    attempts += 1
-                    continue
-
-            try:
-                self.proc.sendline('export PS1="%s"' % self.MASTER_PS1)
-                self.proc.expect(
-                    self.MASTER_PS1_PATTERN, timeout=120, lava_no_logging=1)
-            except pexpect.TIMEOUT as e:
-                msg = "Failed to get command line prompt: " % e
-                logging.warning(msg)
-                attempts += 1
-                continue
-
-            runner = MasterCommandRunner(self)
-            try:
-                self.master_ip = runner.get_target_ip()
-                self.device_version = runner.get_device_version()
-            except NetworkError as e:
-                msg = "Failed to get network up: " % e
-                logging.warning(msg)
-                attempts += 1
-                continue
-
-            lava_proxy = self.context.config.lava_proxy
-            if lava_proxy:
-                logging.info("Setting up http proxy")
-                runner.run("export http_proxy=%s" % lava_proxy, timeout=30)
-            logging.info("System is in master image now")
-            in_master_image = True
-
-        if not in_master_image:
-            msg = "Could not get master image booted properly"
-            logging.critical(msg)
-            raise CriticalError(msg)
-
-    @contextlib.contextmanager
-    def _as_master(self):
-        """A session that can be used to run commands in the master image."""
-        self.proc.sendline("")
-        match_id = self.proc.expect(
-            [self.MASTER_PS1_PATTERN, pexpect.TIMEOUT],
-            timeout=10, lava_no_logging=1)
-        if match_id == 1:
-            self.boot_master_image()
-        yield MasterCommandRunner(self)
-
-    def _soft_reboot(self):
-        logging.info("Perform soft reboot the system")
-        self.master_ip = None
-        # Try to C-c the running process, if any.
-        self.proc.sendcontrol('c')
-        self.proc.sendline(self.config.soft_boot_cmd)
-        # Looking for reboot messages or if they are missing, the U-Boot
-        # message will also indicate the reboot is done.
-        match_id = self.proc.expect(
-            [pexpect.TIMEOUT, 'Restarting system.',
-             'The system is going down for reboot NOW',
-             'Will now restart', 'U-Boot'], timeout=120)
-        if match_id == 0:
-            raise OperationFailed("Soft reboot failed")
-
-    def _hard_reboot(self):
-        logging.info("Perform hard reset on the system")
-        self.master_ip = None
-        if self.config.hard_reset_command != "":
-            self.context.run_command(self.config.hard_reset_command)
-        else:
-            self.proc.send("~$")
-            self.proc.sendline("hardreset")
-            self.proc.empty_buffer()
-
-    def _boot_linaro_image(self):
-        boot_cmds_job_file = False
-        boot_cmds_boot_options = False
-        boot_cmds = self.deployment_data['boot_cmds']
-        options = boot_options.as_dict(self, defaults={'boot_cmds': boot_cmds})
-
-        boot_cmds_job_file = self._is_job_defined_boot_cmds(self.config.boot_cmds)
-
-        if 'boot_cmds' in options:
-            if options['boot_cmds'].value != 'boot_cmds':
-                boot_cmds_boot_options = True
-
-        # Interactive boot_cmds from the job file are a list.
-        # We check for them first, if they are present, we use
-        # them and ignore the other cases.
-        if boot_cmds_job_file:
-            logging.info('Overriding boot_cmds from job file')
-            boot_cmds_override = True
-            boot_cmds = self.config.boot_cmds
-        # If there were no interactive boot_cmds, next we check
-        # for boot_option overrides. If one exists, we use them
-        # and ignore all other cases.
-        elif boot_cmds_boot_options:
-            logging.info('Overriding boot_cmds from boot_options')
-            boot_cmds = options['boot_cmds'].value
-            logging.info('boot_option=%s' % boot_cmds)
-            boot_cmds = self.config.cp.get('__main__', boot_cmds)
-            boot_cmds = string_to_list(boot_cmds.encode('ascii'))
-        # No interactive or boot_option overrides are present,
-        # we prefer to get the boot_cmds for the image if they are
-        # present.
-        elif self.deployment_data.get('boot_cmds_dynamic'):
-            logging.info('Loading boot_cmds from image')
-            boot_cmds = self.deployment_data['boot_cmds_dynamic']
-        # This is the catch all case. Where we get the default boot_cmds
-        # from the deployment data.
-        else:            
-            logging.info('Loading boot_cmds from device configuration')
-            boot_cmds = self.config.cp.get('__main__', boot_cmds)
-            boot_cmds = string_to_list(boot_cmds.encode('ascii'))
-
-        logging.info('boot_cmds: %s', boot_cmds)
-
-        self._boot(boot_cmds)
-
-    def _boot(self, boot_cmds):
-        try:
-            self._soft_reboot()
-            self._enter_bootloader(self.proc)
-        except:
-            logging.exception("_enter_bootloader failed")
-            self._hard_reboot()
-            self._enter_bootloader(self.proc)
-        self._customize_bootloader(self.proc, boot_cmds)
-
-target_class = MasterImageTarget
-
-
-class MasterCommandRunner(NetworkCommandRunner):
-    """A CommandRunner to use when the board is booted into the master image.
-    """
-
-    def __init__(self, target):
-        super(MasterCommandRunner, self).__init__(
-            target, target.MASTER_PS1_PATTERN, prompt_str_includes_rc=True)
-
-    def get_device_version(self):
-        pattern = 'device_version=(\d+-\d+/\d+-\d+)'
-        self.run("echo \"device_version="
-                 "$(lava-master-image-info --master-image-hwpack "
-                 "| sed 's/[^0-9-]//g; s/^-\+//')"
-                 "/"
-                 "$(lava-master-image-info --master-image-rootfs "
-                 "| sed 's/[^0-9-]//g; s/^-\+//')"
-                 "\"",
-                 [pattern, pexpect.EOF, pexpect.TIMEOUT],
-                 timeout=5)
-
-        device_version = None
-        if self.match_id == 0:
-            device_version = self.match.group(1)
-            logging.debug('Master image version (hwpack/rootfs) is %s' % device_version)
-        else:
-            logging.warning('Could not determine image version!')
-
-        return device_version
-
-    def has_partition_with_label(self, label):
-        if not label:
-            return False
-
-        path = '/dev/disk/by-label/%s' % label
-        return self.is_file_exist(path)
-
-    def is_file_exist(self, path):
-        cmd = 'ls %s > /dev/null' % path
-        rc = self.run(cmd, failok=True)
-        if rc == 0:
-            return True
-        return False
-
-
-def _extract_partition(image, partno, tarfile):
-    """Mount a partition and produce a tarball of it
-
-    :param image: The image to mount
-    :param partno: The index of the partition in the image
-    :param tarfile: path and filename of the tgz to output
-    """
-    with image_partition_mounted(image, partno) as mntdir:
-        mk_targz(tarfile, mntdir, asroot=True)
-
-
-def _deploy_linaro_rootfs(session, rootfs):
-    logging.info("Deploying linaro image")
-    session.run('udevadm trigger')
-    session.run('mkdir -p /mnt/root')
-    session.run('mount /dev/disk/by-label/testrootfs /mnt/root')
-    # The timeout has to be this long for vexpress. For a full desktop it
-    # takes 214 minutes, plus about 25 minutes for the mkfs ext3, add
-    # another hour to err on the side of caution.
-    session._client.target_extract(session, rootfs, '/mnt/root', timeout=18000)
-
-    #DO NOT REMOVE - diverting flash-kernel and linking it to /bin/true
-    #prevents a serious problem where packages getting installed that
-    #call flash-kernel can update the kernel on the master image
-    if session.run('chroot /mnt/root which dpkg-divert', failok=True) == 0:
-        session.run(
-            'chroot /mnt/root dpkg-divert --local /usr/sbin/flash-kernel')
-    session.run(
-        'chroot /mnt/root ln -sf /bin/true /usr/sbin/flash-kernel')
-    session.run('umount /mnt/root')
-
-
-def _deploy_linaro_bootfs(session, bootfs):
-    logging.info("Deploying linaro bootfs")
-    session.run('udevadm trigger')
-    session.run('mkdir -p /mnt/boot')
-    session.run('mount /dev/disk/by-label/testboot /mnt/boot')
-    session._client.target_extract(session, bootfs, '/mnt/boot')
-    session.run('umount /mnt/boot')
-
-
-def _deploy_linaro_android_boot(session, boottbz2, target):
-    logging.info("Deploying test boot filesystem")
-    session.run('mkdir -p /mnt/lava/boot')
-    session.run('mount /dev/disk/by-label/testboot /mnt/lava/boot')
-    session._client.target_extract(session, boottbz2, '/mnt/lava')
-    _recreate_uInitrd(session, target)
-    session.run('umount /mnt/lava/boot')
-
-
-def _update_uInitrd_partitions(session, rc_filename):
-    # Original android sdcard partition layout by l-a-m-c
-    sys_part_org = session._client.config.sys_part_android_org
-    cache_part_org = session._client.config.cache_part_android_org
-    data_part_org = session._client.config.data_part_android_org
-    partition_padding_string_org = session._client.config.partition_padding_string_org
-
-    # Sdcard layout in Lava image
-    sys_part_lava = session._client.config.sys_part_android
-    data_part_lava = session._client.config.data_part_android
-    partition_padding_string_lava = session._client.config.partition_padding_string_android
-
-    blkorg = session._client.config.android_orig_block_device
-    blklava = session._client.config.android_lava_block_device
-
-    # delete use of cache partition
-    session.run('sed -i "/\/dev\/block\/%s%s%s/d" %s'
-                % (blkorg, partition_padding_string_org, cache_part_org, rc_filename))
-    session.run('sed -i "s/%s%s%s/%s%s%s/g" %s' % (blkorg, partition_padding_string_org, data_part_org, blklava,
-                                                   partition_padding_string_lava, data_part_lava, rc_filename))
-    session.run('sed -i "s/%s%s%s/%s%s%s/g" %s' % (blkorg, partition_padding_string_org, sys_part_org, blklava,
-                                                   partition_padding_string_lava, sys_part_lava, rc_filename))
-
-
-def _recreate_uInitrd(session, target):
-    logging.debug("Recreate uInitrd")
-
-    session.run('mkdir -p ~/tmp/')
-    session.run('mv /mnt/lava/boot/uInitrd ~/tmp')
-    session.run('cd ~/tmp/')
-
-    session.run('nice dd if=uInitrd of=uInitrd.data ibs=64 skip=1')
-    session.run('mv uInitrd.data ramdisk.cpio.gz')
-    session.run('nice gzip -d -f ramdisk.cpio.gz; cpio -i -F ramdisk.cpio')
-
-    session.run(
-        'sed -i "/export PATH/a \ \ \ \ export PS1 \'%s\'" init.rc' %
-        target.ANDROID_TESTER_PS1)
-
-    # The mount partitions have moved from init.rc to init.partitions.rc
-    # For backward compatible with early android build, we update both rc files
-    # For omapzoom and aosp and JB4.2 the operation for mounting partitions are
-    # in init.omap4pandaboard.rc and fstab.* files
-    possible_partitions_files = session._client.config.possible_partitions_files
-
-    for f in possible_partitions_files:
-        if session.is_file_exist(f):
-            _update_uInitrd_partitions(session, f)
-            session.run("cat %s" % f, failok=True)
-
-    session.run('nice cpio -i -t -F ramdisk.cpio | cpio -o -H newc | \
-            gzip > ramdisk_new.cpio.gz')
-
-    session.run(
-        'nice mkimage -A arm -O linux -T ramdisk -n "Android Ramdisk Image" \
-            -d ramdisk_new.cpio.gz uInitrd')
-
-    session.run('cd -')
-    session.run('mv ~/tmp/uInitrd /mnt/lava/boot/uInitrd')
-    session.run('rm -rf ~/tmp')
-
-
-def _deploy_linaro_android_system(session, systemtbz2):
-    logging.info("Deploying the system filesystem")
-    target = session._client
-
-    session.run('mkdir -p /mnt/lava/system')
-    session.run('mount /dev/disk/by-label/testrootfs /mnt/lava/system')
-    # Timeout has to be this long because of older vexpress motherboards
-    # being somewhat slower
-    session._client.target_extract(
-        session, systemtbz2, '/mnt/lava', timeout=3600)
-
-    if session.has_partition_with_label('userdata') and \
-       session.has_partition_with_label('sdcard') and \
-       session.is_file_exist('/mnt/lava/system/etc/vold.fstab'):
-        # If there is no userdata partition on the sdcard(like iMX and Origen),
-        # then the sdcard partition will be used as the userdata partition as
-        # before, and so cannot be used here as the sdcard on android
-        original = 'dev_mount sdcard %s %s ' % (
-            target.config.sdcard_mountpoint_path,
-            target.config.sdcard_part_android_org)
-        replacement = 'dev_mount sdcard %s %s ' % (
-            target.config.sdcard_mountpoint_path,
-            target.config.sdcard_part_android)
-        sed_cmd = "s@{original}@{replacement}@".format(original=original,
-                                                       replacement=replacement)
-        session.run(
-            'sed -i "%s" /mnt/lava/system/etc/vold.fstab' % sed_cmd,
-            failok=True)
-        session.run("cat /mnt/lava/system/etc/vold.fstab", failok=True)
-
-    script_path = '%s/%s' % ('/mnt/lava', '/system/bin/disablesuspend.sh')
-    if not session.is_file_exist(script_path):
-        session.run("sh -c 'export http_proxy=%s'" %
-                    target.context.config.lava_proxy)
-        session.run('wget --no-check-certificate %s -O %s' %
-                    (target.config.git_url_disablesuspend_sh, script_path))
-        session.run('chmod +x %s' % script_path)
-        session.run('chown :2000 %s' % script_path)
-
-    session.run(
-        ('sed -i "s/^PS1=.*$/PS1=\'%s\'/" '
-         '/mnt/lava/system/etc/mkshrc') % target.ANDROID_TESTER_PS1,
-        failok=True)
-
-    session.run('umount /mnt/lava/system')
-
-
-def _purge_linaro_android_sdcard(session):
-    logging.info("Reformatting Linaro Android sdcard filesystem")
-    session.run('nice mkfs.vfat /dev/disk/by-label/sdcard -n sdcard')
-    session.run('udevadm trigger')
-
-
-def _android_data_label(session):
-    data_label = 'userdata'
-    if not session.has_partition_with_label(data_label):
-        #consider the compatiblity, here use the existed sdcard partition
-        data_label = 'sdcard'
-    return data_label
-
-
-def _deploy_linaro_android_data(session, datatbz2):
-    data_label = _android_data_label(session)
-    session.run('umount /dev/disk/by-label/%s' % data_label, failok=True)
-    session.run('nice mkfs.ext4 -q /dev/disk/by-label/%s -L %s' %
-                (data_label, data_label))
-    session.run('udevadm trigger')
-    session.run('mkdir -p /mnt/lava/data')
-    session.run('mount /dev/disk/by-label/%s /mnt/lava/data' % data_label)
-    session._client.target_extract(session, datatbz2, '/mnt/lava', timeout=600)
-    session.run('umount /mnt/lava/data')

=== removed file 'lava_dispatcher/device/nexus10.py'
--- lava_dispatcher/device/nexus10.py	2013-07-16 16:03:44 +0000
+++ lava_dispatcher/device/nexus10.py	1970-01-01 00:00:00 +0000
@@ -1,68 +0,0 @@ 
-# Copyright (C) 2013 Linaro Limited
-#
-# Author: Tyler Baker <Tyler.Baker@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.    See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-from lava_dispatcher.device.target import (
-    Target
-)
-from lava_dispatcher.errors import (
-    CriticalError,
-)
-from lava_dispatcher.device.fastboot import (
-    FastbootTarget
-)
-
-
-class Nexus10Target(FastbootTarget):
-
-    def __init__(self, context, config):
-        super(Nexus10Target, self).__init__(context, config)
-
-    def deploy_android(self, boot, system, userdata):
-
-        boot = self._get_image(boot)
-        system = self._get_image(system)
-        userdata = self._get_image(userdata)
-
-        self.fastboot.enter()
-        self.fastboot.flash('boot', boot)
-        self.fastboot.flash('system', system)
-        self.fastboot.flash('userdata', userdata)
-
-        self.deployment_data = Target.android_deployment_data
-        self.deployment_data['boot_image'] = boot
-
-    def power_on(self):
-        if not self.deployment_data.get('boot_image', False):
-            raise CriticalError('Deploy action must be run first')
-
-        self.fastboot.enter()
-        self.fastboot('reboot')
-
-        self._adb('wait-for-device')
-
-        self._booted = True
-        proc = self._adb('shell', spawn=True)
-        proc.sendline("")  # required to put the adb shell in a reasonable state
-        proc.sendline("export PS1='%s'" % self.deployment_data['TESTER_PS1'])
-        self._runner = self._get_runner(proc)
-
-        return proc
-
-target_class = Nexus10Target

=== removed file 'lava_dispatcher/device/qemu.py'
--- lava_dispatcher/device/qemu.py	2013-08-30 22:15:05 +0000
+++ lava_dispatcher/device/qemu.py	1970-01-01 00:00:00 +0000
@@ -1,132 +0,0 @@ 
-# Copyright (C) 2011 Linaro Limited
-#
-# Author: Michael Hudson-Doyle <michael.hudson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import contextlib
-import logging
-import subprocess
-import re
-
-from lava_dispatcher.device.target import (
-    Target
-)
-from lava_dispatcher.client.lmc_utils import (
-    generate_image,
-    image_partition_mounted,
-)
-from lava_dispatcher.downloader import (
-    download_image,
-)
-from lava_dispatcher.utils import (
-    ensure_directory,
-    extract_targz,
-    finalize_process,
-)
-from lava_dispatcher.errors import (
-    CriticalError
-)
-
-
-class QEMUTarget(Target):
-
-    def __init__(self, context, config):
-        super(QEMUTarget, self).__init__(context, config)
-        self._qemu_options = None
-        self._sd_image = None
-
-    def deploy_linaro_kernel(self, kernel, ramdisk, dtb, rootfs, bootloader,
-                             firmware, rootfstype, bootloadertype):
-        if rootfs is not None:
-            self._sd_image = download_image(rootfs, self.context)
-            self._customize_linux(self._sd_image)
-            self.append_qemu_options(self.config.qemu_options.format(
-                DISK_IMAGE=self._sd_image))
-            kernel_args = 'root=/dev/sda1'
-        else:
-            raise CriticalError("You must specify a QEMU file system image")
-
-        if kernel is not None:
-            kernel = download_image(kernel, self.context)
-            self.append_qemu_options(' -kernel %s' % kernel)
-            kernel_args += ' console=ttyS0,115200'
-            if ramdisk is not None:
-                ramdisk = download_image(ramdisk, self.context)
-                self.append_qemu_options(' -initrd %s' % ramdisk)
-            if dtb is not None:
-                dtb = download_image(dtb, self.context)
-                self.append_qemu_options(' -dtb %s' % ramdisk)
-            if firmware is not None:
-                firmware = download_image(firmware, self.context)
-                self.append_qemu_options(' -bios %s' % firmware)
-            self.append_qemu_options(' -append "%s"' % kernel_args)
-        else:
-            raise CriticalError("No kernel images to boot")
-
-    def deploy_linaro(self, hwpack=None, rootfs=None, bootloadertype='u_boot'):
-        odir = self.scratch_dir
-        self._sd_image = generate_image(self, hwpack, rootfs, odir, bootloadertype)
-        self._customize_linux(self._sd_image)
-        self.append_qemu_options(self.config.qemu_options.format(
-            DISK_IMAGE=self._sd_image))
-
-    def deploy_linaro_prebuilt(self, image, bootloadertype='u_boot'):
-        self._sd_image = download_image(image, self.context)
-        self._customize_linux(self._sd_image)
-        self.append_qemu_options(self.config.qemu_options.format(
-            DISK_IMAGE=self._sd_image))
-
-    @contextlib.contextmanager
-    def file_system(self, partition, directory):
-        with image_partition_mounted(self._sd_image, partition) as mntdir:
-            path = '%s/%s' % (mntdir, directory)
-            ensure_directory(path)
-            yield path
-
-    def extract_tarball(self, tarball_url, partition, directory='/'):
-        logging.info('extracting %s to target' % tarball_url)
-
-        with image_partition_mounted(self._sd_image, partition) as mntdir:
-            tb = download_image(tarball_url, self.context, decompress=False)
-            extract_targz(tb, '%s/%s' % (mntdir, directory))
-
-    def power_on(self):
-        qemu_cmd = '%s %s' % (self.config.qemu_binary, self._qemu_options)
-        logging.info('launching qemu with command %r' % qemu_cmd)
-        proc = self.context.spawn(qemu_cmd, timeout=1200)
-        return proc
-
-    def power_off(self, proc):
-        finalize_process(proc)
-
-    def get_device_version(self):
-        try:
-            output = subprocess.check_output(
-                [self.config.qemu_binary, '--version'])
-            matches = re.findall('[0-9]+\.[0-9a-z.+\-:~]+', output)
-            return matches[-1]
-        except subprocess.CalledProcessError:
-            return "unknown"
-
-    def append_qemu_options(self, parameter):
-        if self._qemu_options is None:
-            self._qemu_options = parameter
-        else:
-            self._qemu_options += parameter
-
-target_class = QEMUTarget

=== removed file 'lava_dispatcher/device/sdmux.py'
--- lava_dispatcher/device/sdmux.py	2013-09-11 15:14:22 +0000
+++ lava_dispatcher/device/sdmux.py	1970-01-01 00:00:00 +0000
@@ -1,237 +0,0 @@ 
-# Copyright (C) 2012-2013 Linaro Limited
-#
-# Author: Andy Doan <andy.doan@linaro.org>
-#         Dave Pigott <dave.pigott@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import contextlib
-import logging
-import os
-import glob
-import subprocess
-import time
-import lava_dispatcher.actions.lmp.sdmux as sdmux
-
-from lava_dispatcher.errors import (
-    CriticalError,
-)
-from lava_dispatcher.device.master import (
-    MasterImageTarget
-)
-from lava_dispatcher.client.lmc_utils import (
-    generate_android_image,
-    generate_image,
-    image_partition_mounted,
-)
-from lava_dispatcher.downloader import (
-    download_image,
-)
-from lava_dispatcher.utils import (
-    connect_to_serial,
-    ensure_directory,
-    extract_targz,
-)
-
-
-def _flush_files(mntdir):
-    """
-    calls to umount can fail because the files haven't completely been written
-    to disk. This helps make sure that happens and eliminates a warning
-    """
-    for f in os.listdir('/proc/self/fd'):
-        # check for existances since listdir will include an fd for itself
-        if os.path.exists(f):
-            path = os.path.realpath('/proc/self/fd/%s' % f)
-            if path.startswith(mntdir):
-                os.fsync(int(f))
-                os.close(int(f))
-
-
-class SDMuxTarget(MasterImageTarget):
-    """
-    This adds support for the "sd mux" device. An SD-MUX device is a piece of
-    hardware that allows the host and target to both connect to the same SD
-    card. The control of the SD card can then be toggled between the target
-    and host via software.
-"""
-
-    def __init__(self, context, config):
-        super(SDMuxTarget, self).__init__(context, config)
-
-        self.proc = None
-
-        if not config.sdmux_usb_id:
-            raise CriticalError('Device config requires "sdmux_usb_id"')
-
-        if not config.sdmux_id:
-            raise CriticalError('Device config requires "sdmux_id"')
-
-        if config.pre_connect_command:
-            self.context.run_command(config.pre_connect_command)
-
-    def deploy_linaro(self, hwpack=None, rootfs=None, bootloadertype=None):
-        img = generate_image(self, hwpack, rootfs, self.scratch_dir)
-        self._customize_linux(img)
-        self._write_image(img)
-
-    def deploy_linaro_prebuilt(self, image, bootloadertype=None):
-        img = download_image(image, self.context)
-        self._customize_linux(img)
-        self._write_image(img)
-
-    def _customize_android(self, img):
-        sys_part = self.config.sys_part_android_org
-        with image_partition_mounted(img, sys_part) as d:
-            with open('%s/etc/mkshrc' % d, 'a') as f:
-                f.write('\n# LAVA CUSTOMIZATIONS\n')
-                f.write('PS1="%s"\n' % self.ANDROID_TESTER_PS1)
-        self.deployment_data = MasterImageTarget.android_deployment_data
-
-    def deploy_android(self, boot, system, data):
-        scratch = self.scratch_dir
-        boot = download_image(boot, self.context, scratch, decompress=False)
-        data = download_image(data, self.context, scratch, decompress=False)
-        system = download_image(system, self.context, scratch, decompress=False)
-
-        img = os.path.join(scratch, 'android.img')
-        device_type = self.config.lmc_dev_arg
-        generate_android_image(self.context, device_type, boot, data, system, img)
-        self._customize_android(img)
-        self._write_image(img)
-
-    def _write_image(self, image):
-        sdmux.dut_disconnect(self.config.sdmux_id)
-        sdmux.host_usda(self.config.sdmux_id)
-
-        with self.mux_device() as device:
-            logging.info("dd'ing image to device (%s)", device)
-            dd_cmd = 'dd if=%s of=%s bs=4096 conv=fsync' % (image, device)
-            dd_proc = subprocess.Popen(dd_cmd, shell=True)
-            dd_proc.wait()
-            if dd_proc.returncode != 0:
-                raise CriticalError("Failed to dd image to device (Error code %d)" % dd_proc.returncode)
-
-        sdmux.host_disconnect(self.config.sdmux_id)
-
-    @contextlib.contextmanager
-    def mux_device(self):
-        """
-        This function gives us a safe context in which to deal with the
-        raw sdmux device. It will ensure that:
-          * the target is powered off
-          * the proper sdmux USB device is powered on
-
-        It will then yield to the caller a dev entry like /dev/sdb
-        This entry can be used safely during this context. Upon exiting,
-        the USB device connect to the sdmux will be powered off so that the
-        target will be able to safely access it.
-        """
-
-        self.proc = None
-
-        syspath = "/sys/bus/usb/devices/" + self.config.sdmux_usb_id + \
-            "/" + self.config.sdmux_usb_id + \
-            "*/host*/target*/*:0:0:0/block/*"
-
-        retrycount = 0
-        deventry = ""
-
-        while retrycount < self.config.sdmux_mount_retry_seconds:
-            device_list = glob.glob(syspath)
-            for device in device_list:
-                deventry = os.path.join("/dev/", os.path.basename(device))
-                break
-            if deventry != "":
-                break
-            time.sleep(1)
-            retrycount += 1
-
-        if deventry != "":
-            logging.debug('found sdmux device %s: Waiting %ds for any mounts to complete'
-                          % (deventry, self.config.sdmux_mount_wait_seconds))
-            time.sleep(self.config.sdmux_mount_wait_seconds)
-            logging.debug("Unmounting %s*", deventry)
-            os.system("umount %s*" % deventry)
-            logging.debug('returning sdmux device as: %s', deventry)
-            yield deventry
-        else:
-            raise CriticalError('Unable to access sdmux device')
-
-    @contextlib.contextmanager
-    def file_system(self, partition, directory):
-        """
-        This works in cojunction with the "mux_device" function to safely
-        access a partition/directory on the sdmux filesystem
-        """
-        mntdir = os.path.join(self.scratch_dir, 'sdmux_mnt')
-        if not os.path.exists(mntdir):
-            os.mkdir(mntdir)
-
-        with self.mux_device() as device:
-            device = '%s%s' % (device, partition)
-            try:
-                self.context.run_command(['mount', device, mntdir], failok=False)
-                if directory[0] == '/':
-                    directory = directory[1:]
-                path = os.path.join(mntdir, directory)
-                ensure_directory(path)
-                logging.info('sdmux(%s) mounted at: %s', device, path)
-                yield path
-            except CriticalError:
-                raise
-            except subprocess.CalledProcessError:
-                raise CriticalError('Unable to access sdmux device')
-            except:
-                logging.exception('Error accessing sdmux filesystem')
-                raise CriticalError('Error accessing sdmux filesystem')
-            finally:
-                logging.info('unmounting sdmux')
-                try:
-                    _flush_files(mntdir)
-                    self.context.run_command(['umount', device], failok=False)
-                except subprocess.CalledProcessError:
-                    logging.exception('umount failed, re-try in 10 seconds')
-                    time.sleep(10)
-                    if self.context.run_command(['umount', device]) != 0:
-                        logging.error(
-                            'Unable to unmount sdmux device %s', device)
-
-    def extract_tarball(self, tarball_url, partition, directory='/'):
-        logging.info('extracting %s to target', tarball_url)
-        with self.file_system(partition, directory) as mntdir:
-            tb = download_image(tarball_url, self.context, decompress=False)
-            extract_targz(tb, '%s/%s' % (mntdir, directory))
-
-    def power_off(self, proc):
-        super(SDMuxTarget, self).power_off(proc)
-        self.context.run_command(self.config.power_off_cmd)
-        sdmux.dut_disconnect(self.config.sdmux_id)
-
-    def power_on(self):
-        sdmux.host_disconnect(self.config.sdmux_id)
-        sdmux.dut_usda(self.config.sdmux_id)
-
-        logging.info('powering on')
-        self.context.run_command(self.config.power_on_cmd)
-
-        return self.proc
-
-    def get_device_version(self):
-        return self.config.sdmux_version
-
-target_class = SDMuxTarget

=== removed file 'lava_dispatcher/device/target.py'
--- lava_dispatcher/device/target.py	2013-09-10 16:30:04 +0000
+++ lava_dispatcher/device/target.py	1970-01-01 00:00:00 +0000
@@ -1,288 +0,0 @@ 
-# Copyright (C) 2012 Linaro Limited
-#
-# Author: Andy Doan <andy.doan@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import contextlib
-import os
-import shutil
-import re
-
-from lava_dispatcher.client.base import (
-    wait_for_prompt
-)
-from lava_dispatcher.client.lmc_utils import (
-    image_partition_mounted
-)
-import lava_dispatcher.utils as utils
-
-
-def get_target(context, device_config):
-    ipath = 'lava_dispatcher.device.%s' % device_config.client_type
-    m = __import__(ipath, fromlist=[ipath])
-    return m.target_class(context, device_config)
-
-
-class Target(object):
-    """ Defines the contract needed by the dispatcher for dealing with a
-    target device
-    """
-
-    ANDROID_TESTER_PS1 = "root@linaro# "
-
-    # The target deployment functions will point self.deployment_data to
-    # the appropriate dictionary below. Code such as actions can contribute
-    # to these structures with special handling logic
-    android_deployment_data = {
-        'TESTER_PS1': ANDROID_TESTER_PS1,
-        'TESTER_PS1_PATTERN': ANDROID_TESTER_PS1,
-        'TESTER_PS1_INCLUDES_RC': False,
-    }
-    ubuntu_deployment_data = {
-        'TESTER_PS1': "linaro-test [rc=$(echo \$?)]# ",
-        'TESTER_PS1_PATTERN': "linaro-test \[rc=(\d+)\]# ",
-        'TESTER_PS1_INCLUDES_RC': True,
-    }
-    oe_deployment_data = {
-        'TESTER_PS1': "linaro-test [rc=$(echo \$?)]# ",
-        'TESTER_PS1_PATTERN': "linaro-test \[rc=(\d+)\]# ",
-        'TESTER_PS1_INCLUDES_RC': True,
-    }
-    fedora_deployment_data = {
-        'TESTER_PS1': "linaro-test [rc=$(echo \$?)]# ",
-        'TESTER_PS1_PATTERN': "linaro-test \[rc=(\d+)\]# ",
-        'TESTER_PS1_INCLUDES_RC': True,
-    }
-
-    def __init__(self, context, device_config):
-        self.context = context
-        self.config = device_config
-        self.boot_options = []
-        self._scratch_dir = None
-        self.deployment_data = {}
-
-    @property
-    def scratch_dir(self):
-        if self._scratch_dir is None:
-            self._scratch_dir = utils.mkdtemp(
-                self.context.config.lava_image_tmpdir)
-        return self._scratch_dir
-
-    def power_on(self):
-        """ responsible for powering on the target device and returning an
-        instance of a pexpect session
-        """
-        raise NotImplementedError('power_on')
-
-    def deploy_linaro(self, hwpack, rfs, bootloadertype):
-        raise NotImplementedError('deploy_image')
-
-    def deploy_android(self, boot, system, userdata):
-        raise NotImplementedError('deploy_android_image')
-
-    def deploy_linaro_prebuilt(self, image, bootloadertype):
-        raise NotImplementedError('deploy_linaro_prebuilt')
-
-    def power_off(self, proc):
-        if proc is not None:
-            proc.close()
-
-    @contextlib.contextmanager
-    def file_system(self, partition, directory):
-        """ Allows the caller to interact directly with a directory on
-        the target. This method yields a directory where the caller can
-        interact from. Upon the exit of this context, the changes will be
-        applied to the target.
-
-        The partition parameter refers to partition number the directory
-        would reside in as created by linaro-media-create. ie - the boot
-        partition would be 1. In the case of something like the master
-        image, the target implementation must map this number to the actual
-        partition its using.
-
-        NOTE: due to difference in target implementations, the caller should
-        try and interact with the smallest directory locations possible.
-        """
-        raise NotImplementedError('file_system')
-
-    def extract_tarball(self, tarball_url, partition, directory='/'):
-        """ This is similar to the file_system API but is optimized for the
-        scenario when you just need explode a potentially large tarball on
-        the target device. The file_system API isn't really suitable for this
-        when thinking about an implementation like master.py
-        """
-        raise NotImplementedError('extract_tarball')
-
-    @contextlib.contextmanager
-    def runner(self):
-        """ Powers on the target, returning a CommandRunner object and will
-        power off the target when the context is exited
-        """
-        proc = runner = None
-        try:
-            proc = self.power_on()
-            runner = self._get_runner(proc)
-            yield runner
-        finally:
-            if proc and runner:
-                pass
-
-    def _get_runner(self, proc):
-        from lava_dispatcher.client.base import CommandRunner
-        pat = self.deployment_data['TESTER_PS1_PATTERN']
-        incrc = self.deployment_data['TESTER_PS1_INCLUDES_RC']
-        return CommandRunner(proc, pat, incrc)
-
-    def get_test_data_attachments(self):
-        return []
-
-    def get_device_version(self):
-        """ Returns the device version associated with the device, i.e. version
-        of emulation software, or version of master image. Must be overriden in
-        subclasses.
-        """
-        return 'unknown'
-
-    def _find_and_copy(self, rootdir, odir, pattern, name=None):
-        dest = None
-        for root, dirs, files in os.walk(rootdir):
-            for file_name in files:
-                if re.match(pattern, file_name):
-                    if name:
-                        dest = os.path.join(odir, name)
-                    else:
-                        dest = os.path.join(odir, file_name)
-                    if rootdir != odir:
-                        src = os.path.join(root, file_name)
-                        shutil.copyfile(src, dest)
-                        return dest
-                    else:
-                        return dest
-        return dest
-
-    def _wait_for_prompt(self, connection, prompt_pattern, timeout):
-        wait_for_prompt(connection, prompt_pattern, timeout)
-
-    def _is_job_defined_boot_cmds(self, boot_cmds):
-        if isinstance(self.config.boot_cmds, basestring):
-            return False
-        else:
-            return True
-
-    def _enter_bootloader(self, connection):
-        if connection.expect(self.config.interrupt_boot_prompt) != 0:
-            raise Exception("Failed to enter bootloader")
-        connection.sendline(self.config.interrupt_boot_command)
-
-    def _customize_bootloader(self, connection, boot_cmds):
-        for line in boot_cmds:
-            parts = re.match('^(?P<action>sendline|expect)\s*(?P<command>.*)',
-                             line)
-            if parts:
-                try:
-                    action = parts.group('action')
-                    command = parts.group('command')
-                except AttributeError as e:
-                    raise Exception("Badly formatted command in \
-                                      boot_cmds %s" % e)
-                if action == "sendline":
-                    connection.send(command)
-                    connection.sendline('')
-                elif action == "expect":
-                    command = re.escape(command)
-                    connection.expect(command, timeout=300)
-            else:
-                self._wait_for_prompt(connection,
-                                      self.config.bootloader_prompt,
-                                      timeout=300)
-                connection.sendline(line)
-
-    def _target_extract(self, runner, tar_file, dest, timeout=-1):
-        tmpdir = self.context.config.lava_image_tmpdir
-        url = self.context.config.lava_image_url
-        tar_file = tar_file.replace(tmpdir, '')
-        tar_url = '/'.join(u.strip('/') for u in [url, tar_file])
-        self._target_extract_url(runner, tar_url, dest, timeout=timeout)
-
-    def _target_extract_url(self, runner, tar_url, dest, timeout=-1):
-        decompression_cmd = ''
-        if tar_url.endswith('.gz') or tar_url.endswith('.tgz'):
-            decompression_cmd = '| /bin/gzip -dc'
-        elif tar_url.endswith('.bz2'):
-            decompression_cmd = '| /bin/bzip2 -dc'
-        elif tar_url.endswith('.tar'):
-            decompression_cmd = ''
-        else:
-            raise RuntimeError('bad file extension: %s' % tar_url)
-
-        runner.run('wget -O - %s %s | /bin/tar -C %s -xmf -'
-                   % (tar_url, decompression_cmd, dest),
-                   timeout=timeout)
-
-    def _start_busybox_http_server(self, runner, ip):
-        runner.run('busybox httpd -f &')
-        runner.run('echo $! > /tmp/httpd.pid')
-        url_base = "http://%s" % ip
-        return url_base
-
-    def _stop_busybox_http_server(self, runner):
-        runner.run('kill `cat /tmp/httpd.pid`')
-
-    def _customize_ubuntu(self, rootdir):
-        self.deployment_data = Target.ubuntu_deployment_data
-        with open('%s/root/.bashrc' % rootdir, 'a') as f:
-            f.write('export PS1="%s"\n' % self.deployment_data['TESTER_PS1'])
-        with open('%s/etc/hostname' % rootdir, 'w') as f:
-            f.write('%s\n' % self.config.hostname)
-
-    def _customize_oe(self, rootdir):
-        self.deployment_data = Target.oe_deployment_data
-        with open('%s/etc/profile' % rootdir, 'a') as f:
-            f.write('export PS1="%s"\n' % self.deployment_data['TESTER_PS1'])
-        with open('%s/etc/hostname' % rootdir, 'w') as f:
-            f.write('%s\n' % self.config.hostname)
-
-    def _customize_fedora(self, rootdir):
-        self.deployment_data = Target.fedora_deployment_data
-        with open('%s/etc/profile' % rootdir, 'a') as f:
-            f.write('export PS1="%s"\n' % self.deployment_data['TESTER_PS1'])
-        with open('%s/etc/hostname' % rootdir, 'w') as f:
-            f.write('%s\n' % self.config.hostname)
-
-    def _customize_linux(self, image):
-        root_part = self.config.root_part
-        os_release_id = 'linux'
-        with image_partition_mounted(image, root_part) as mnt:
-            os_release_file = '%s/etc/os-release' % mnt
-            if os.path.exists(os_release_file):
-                for line in open(os_release_file):
-                    if line.startswith('ID='):
-                        os_release_id = line[(len('ID=')):]
-                        os_release_id = os_release_id.strip('\"\n')
-                        break
-
-            if os_release_id == 'debian' or os_release_id == 'ubuntu' or \
-                    os.path.exists('%s/etc/debian_version' % mnt):
-                self._customize_ubuntu(mnt)
-            elif os_release_id == 'fedora':
-                self._customize_fedora(mnt)
-            else:
-                # assume an OE based image. This is actually pretty safe
-                # because we are doing pretty standard linux stuff, just
-                # just no upstart or dash assumptions
-                self._customize_oe(mnt)

=== removed file 'lava_dispatcher/device/vexpress.py'
--- lava_dispatcher/device/vexpress.py	2013-08-28 14:55:50 +0000
+++ lava_dispatcher/device/vexpress.py	1970-01-01 00:00:00 +0000
@@ -1,194 +0,0 @@ 
-# Copyright (C) 2013 Linaro Limited
-#
-# Author: Antonio Terceiro <antonio.terceiro@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.    See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import pexpect
-import os
-import logging
-from time import sleep
-from contextlib import contextmanager
-
-from lava_dispatcher.device.master import MasterImageTarget
-from lava_dispatcher.errors import CriticalError
-
-
-class VexpressTarget(MasterImageTarget):
-
-    def __init__(self, context, config):
-        super(VexpressTarget, self).__init__(context, config)
-
-        self.test_uefi = None
-
-        if (self.config.uefi_image_filename is None or
-                self.config.vexpress_uefi_path is None or
-                self.config.vexpress_uefi_backup_path is None or
-                self.config.vexpress_usb_mass_storage_device is None):
-
-            raise CriticalError(
-                "Versatile Express devices must specify all "
-                "of the following configuration variables: "
-                "uefi_image_filename, vexpress_uefi_path, "
-                "vexpress_uefi_backup_path, and "
-                "vexpress_usb_mass_storage_device")
-
-    ##################################################################
-    # methods inherited from MasterImageTarget and overriden here
-    ##################################################################
-
-    def _soft_reboot(self):
-        """
-        The Vexpress board only displays the prompt to interrupt the MCC when
-        it is power-cycled, so we must always do a hard reset in practice.
-
-        When a soft reboot is requested, though, at least we sync the disks
-        before sending the hard reset.
-        """
-        # Try to C-c the running process, if any
-        self.proc.sendcontrol('c')
-        # Flush file system buffers
-        self.proc.sendline('sync')
-
-        self._hard_reboot()
-
-    def _enter_bootloader(self, connection):
-        with self._mcc_setup() as mount_point:
-            self._install_test_uefi(mount_point)
-
-        super(VexpressTarget, self)._enter_bootloader(connection)
-
-    def _wait_for_master_boot(self):
-        with self._mcc_setup() as mount_point:
-            self._restore_uefi_backup(mount_point)
-
-        super(VexpressTarget, self)._wait_for_master_boot()
-
-    def _deploy_android_tarballs(self, master, boot, system, data):
-        super(VexpressTarget, self)._deploy_android_tarballs(master, boot,
-                                                             system, data)
-        # android images have boot files inside boot/ in the tarball
-        uefi_on_image = os.path.join('boot', self.config.uefi_image_filename)
-        self._extract_uefi_from_tarball(boot, uefi_on_image)
-
-    def _deploy_tarballs(self, boot_tgz, root_tgz):
-        super(VexpressTarget, self)._deploy_tarballs(boot_tgz, root_tgz)
-        uefi_on_image = self.config.uefi_image_filename
-        self._extract_uefi_from_tarball(boot_tgz, uefi_on_image)
-
-    ##################################################################
-    # implementation-specific methods
-    ##################################################################
-
-    @contextmanager
-    def _mcc_setup(self):
-        """
-        This method will manage the context for manipulating the USB mass
-        storage device, and pass the mount point where the USB MSD is mounted
-        to the inner block.
-
-        Example:
-
-            with self._mcc_setup() as mount_point:
-                do_stuff_with(mount_point)
-
-
-        This can be used for example to copy files from/to the USB MSD.
-        Mounting and unmounting is managed by this method, so the inner block
-        does not have to handle that.
-        """
-
-        mount_point = os.path.join(self.scratch_dir, 'vexpress-usb')
-        if not os.path.exists(mount_point):
-            os.makedirs(mount_point)
-
-        self._enter_mcc()
-        self._mount_usbmsd(mount_point)
-        try:
-            yield mount_point
-        finally:
-            self._umount_usbmsd(mount_point)
-            self._leave_mcc()
-
-    def _enter_mcc(self):
-        match_id = self.proc.expect([
-            self.config.vexpress_stop_autoboot_prompt,
-            pexpect.EOF, pexpect.TIMEOUT])
-        if match_id != 0:
-            msg = 'Unable to intercept MCC boot prompt'
-            logging.error(msg)
-            raise CriticalError(msg)
-        self.proc.sendline("")
-        self.proc.expect(['Cmd>'])
-
-    def _mount_usbmsd(self, mount_point):
-        self.proc.sendline("USB_ON")
-        self.proc.expect(['Cmd>'])
-
-        # wait a few seconds so that the kernel on the host detects the USB
-        # mass storage interface exposed by the Vexpress
-        sleep(5)
-
-        usb_device = self.config.vexpress_usb_mass_storage_device
-
-        self.context.run_command('mount %s %s' % (usb_device, mount_point))
-
-    def _umount_usbmsd(self, mount_point):
-        self.context.run_command('umount %s' % mount_point)
-
-    def _leave_mcc(self):
-        self.proc.sendline("reboot")
-
-    def _extract_uefi_from_tarball(self, tarball, uefi_on_image):
-        tmpdir = self.scratch_dir
-
-        # Android boot tarballs have the UEFI binary at boot/*.bin, while
-        # Ubuntu ones have it at ./*.bin
-        #
-        # --no-anchored matches the name inside any directory in the tarball.
-        self.context.run_command('tar --no-anchored -xaf %s -C %s %s' % (tarball, tmpdir,
-                                                                         uefi_on_image))
-
-        uefi_on_image = os.path.join(tmpdir, uefi_on_image)
-        test_uefi = os.path.join(tmpdir, 'uefi.bin')
-        self.context.run_command('mv %s %s' % (uefi_on_image, test_uefi))
-
-        self.test_uefi = test_uefi
-
-    def _restore_uefi_backup(self, mount_point):
-        uefi_path = self.config.vexpress_uefi_path
-        uefi = os.path.join(mount_point, uefi_path)
-        uefi_backup_path = self.config.vexpress_uefi_backup_path
-        uefi_backup = os.path.join(mount_point, uefi_backup_path)
-
-        if os.path.exists(uefi_backup):
-            # restore the uefi backup
-            self.context.run_command('cp %s %s' % (uefi_backup, uefi))
-        else:
-            # no existing backup yet means that this is the first time ever;
-            # the uefi in there is the good one, and we backup it up.
-            self.context.run_command('cp %s %s' % (uefi, uefi_backup))
-
-    def _install_test_uefi(self, mount_point):
-        uefi_path = self.config.vexpress_uefi_path
-        uefi = os.path.join(mount_point, uefi_path)
-        # FIXME what if self.test_uefi is not set, or points to an unexisting
-        # file?
-        self.context.run_command('cp %s %s' % (self.test_uefi, uefi))
-
-
-target_class = VexpressTarget

=== removed file 'lava_dispatcher/downloader.py'
--- lava_dispatcher/downloader.py	2013-07-18 14:10:57 +0000
+++ lava_dispatcher/downloader.py	1970-01-01 00:00:00 +0000
@@ -1,193 +0,0 @@ 
-# Copyright (C) 2012 Linaro Limited
-#
-# Author: Andy Doan <andy.doan@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import atexit
-import bz2
-import contextlib
-import logging
-import os
-import re
-import subprocess
-import time
-import traceback
-import urllib2
-import urlparse
-import zlib
-
-from tempfile import mkdtemp
-from lava_dispatcher.config import get_config_file
-from lava_dispatcher.utils import rmtree
-
-
-@contextlib.contextmanager
-def _scp_stream(url, proxy=None, cookies=None):
-    process = None
-    try:
-        process = subprocess.Popen(
-            ['nice', 'ssh', url.netloc, 'cat', url.path],
-            shell=False,
-            stdout=subprocess.PIPE
-        )
-        yield process.stdout
-    finally:
-        if process:
-            process.kill()
-
-
-@contextlib.contextmanager
-def _http_stream(url, proxy=None, cookies=None):
-    resp = None
-    handlers = []
-    if proxy:
-        handlers = [urllib2.ProxyHandler({'http': '%s' % proxy})]
-    opener = urllib2.build_opener(*handlers)
-
-    if cookies:
-        opener.addheaders.append(('Cookie', cookies))
-
-    try:
-        url = urllib2.quote(url.geturl(), safe=":/")
-        resp = opener.open(url, timeout=30)
-        yield resp
-    finally:
-        if resp:
-            resp.close()
-
-
-@contextlib.contextmanager
-def _file_stream(url, proxy=None, cookies=None):
-    fd = None
-    try:
-        fd = open(url.path, 'rb')
-        yield fd
-    finally:
-        if fd:
-            fd.close()
-
-
-@contextlib.contextmanager
-def _decompressor_stream(url, imgdir, decompress):
-    fd = None
-    decompressor = None
-
-    fname, suffix = _url_to_fname_suffix(url, imgdir)
-
-    if suffix == 'gz' and decompress:
-        decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
-    elif suffix == 'bz2' and decompress:
-        decompressor = bz2.BZ2Decompressor()
-    else:
-        # don't remove the file's real suffix
-        fname = '%s.%s' % (fname, suffix)
-
-    def write(buff):
-        if decompressor:
-            buff = decompressor.decompress(buff)
-        fd.write(buff)
-
-    try:
-        fd = open(fname, 'wb')
-        yield (write, fname)
-    finally:
-        if fd:
-            fd.close
-
-
-def _url_to_fname_suffix(url, path='/tmp'):
-    filename = os.path.basename(url.path)
-    parts = filename.split('.')
-    suffix = parts[-1]
-    filename = os.path.join(path, '.'.join(parts[:-1]))
-    return filename, suffix
-
-
-def _url_mapping(url, context):
-    """allows the downloader to override a URL so that something like:
-     http://blah/ becomes file://localhost/blah
-    """
-    mappings = get_config_file('urlmappings.txt')
-    if mappings:
-        newurl = url
-        with open(mappings, 'r') as f:
-            for line in f.readlines():
-                pat, rep = line.split(',')
-                pat = pat.strip()
-                rep = rep.strip()
-                newurl = re.sub(pat, rep, newurl)
-        if newurl != url:
-            url = newurl
-            logging.info('url mapped to: %s', url)
-    return url
-
-
-def download_image(url, context, imgdir=None,
-                   delete_on_exit=True, decompress=True):
-    """downloads a image that's been compressed as .bz2 or .gz and
-    optionally decompresses it on the file to the cache directory
-    """
-    logging.info("Downloading image: %s" % url)
-    if not imgdir:
-        imgdir = mkdtemp(dir=context.config.lava_image_tmpdir)
-        if delete_on_exit:
-            atexit.register(rmtree, imgdir)
-
-    url = _url_mapping(url, context)
-
-    url = urlparse.urlparse(url)
-    if url.scheme == 'scp':
-        reader = _scp_stream
-    elif url.scheme == 'http' or url.scheme == 'https':
-        reader = _http_stream
-    elif url.scheme == 'file':
-        reader = _file_stream
-    else:
-        raise Exception("Unsupported url protocol scheme: %s" % url.scheme)
-
-    cookies = context.config.lava_cookies
-    with reader(url, context.config.lava_proxy, cookies) as r:
-        with _decompressor_stream(url, imgdir, decompress) as (writer, fname):
-            bsize = 32768
-            buff = r.read(bsize)
-            while buff:
-                writer(buff)
-                buff = r.read(bsize)
-    return fname
-
-
-def download_with_retry(context, imgdir, url, decompress=True, timeout=300):
-    """
-    download test result with a retry mechanism and 5 minute default timeout
-    """
-    logging.info("About to download %s to the host" % url)
-    now = time.time()
-    tries = 0
-
-    while True:
-        try:
-            return download_image(url, context, imgdir, decompress)
-        except:
-            logging.warn("unable to download: %r" % traceback.format_exc())
-            tries += 1
-            if time.time() >= now + timeout:
-                raise RuntimeError(
-                    'downloading %s failed after %d tries' % (url, tries))
-            else:
-                logging.info('Sleep one minute and retry (%d)' % tries)
-                time.sleep(60)

=== removed file 'lava_dispatcher/errors.py'
--- lava_dispatcher/errors.py	2013-03-27 03:50:25 +0000
+++ lava_dispatcher/errors.py	1970-01-01 00:00:00 +0000
@@ -1,63 +0,0 @@ 
-# Copyright (C) 2012 Linaro Limited
-#
-# Author: Michael Hudson-Doyle <michael.hudson@linaro.org>
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-
-class DispatcherError(Exception):
-    """
-    Base exception and error class for dispatcher
-    """
-
-
-class TimeoutError(DispatcherError):
-    """
-    The timeout error
-    """
-
-
-class CriticalError(DispatcherError):
-    """
-    The critical error
-    """
-
-
-class GeneralError(DispatcherError):
-    """
-    The non-critical error
-    """
-
-
-class NetworkError(CriticalError):
-    """
-    This is used when a network error occurs, such as failing to bring up
-    the network interface on the client
-    """
-
-
-class ADBConnectError(NetworkError):
-    """
-    This is used when adb connection failed to created
-    """
-
-
-class OperationFailed(GeneralError):
-    """
-    The exception throws when a file system or system operation fails.
-    """

=== removed file 'lava_dispatcher/ipmi.py'
--- lava_dispatcher/ipmi.py	2013-07-09 14:48:10 +0000
+++ lava_dispatcher/ipmi.py	1970-01-01 00:00:00 +0000
@@ -1,96 +0,0 @@ 
-# Copyright (C) 2013 Linaro Limited
-#
-# Authors:
-#   Antonio Terceiro <antonio.terceiro@linaro.org>
-#   Michael Hudson-Doyle <michael.hudson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.    See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-
-class IPMITool(object):
-    """
-    This class wraps the ipmitool CLI to provide a convenient object-oriented
-    API that can be composed into the implementation of devices that can be
-    managed with IPMI.
-    """
-
-    def __init__(self, context, host, ipmitool="ipmitool"):
-        self.host = host
-        self.context = context
-        self.ipmitool = ipmitool
-
-    def __ipmi(self, command):
-        self.context.run_command(
-            "%s -H %s -U admin -P admin %s" % (
-                self.ipmitool, self.host, command
-            ),
-            failok=False
-        )
-
-    def __ipmi_cmd_output(self, command):
-        return self.context.run_command_get_output(
-            "%s -H %s -U admin -P admin %s" % (
-                self.ipmitool, self.host, command)
-        )
-
-    def set_to_boot_from_disk(self):
-        self.__ipmi("chassis bootdev disk")
-
-    def set_to_boot_from_pxe(self):
-        self.__ipmi("chassis bootdev pxe")
-
-    def power_off(self):
-        self.__ipmi("chassis power off")
-
-    def power_on(self):
-        self.__ipmi("chassis power on")
-
-    def reset(self):
-        self.__ipmi("chassis power reset")
-
-    def get_power_status(self):
-        """ Command 'ipmitool power status' will output 'Chassis Power is on'
-            or 'Chassis Power is off'.
-            Before we return the last string, the '\n' needs to be strip."""
-        return self.__ipmi_cmd_output("power status").split(' ')[-1].rstrip()
-
-class IpmiPxeBoot(object):
-    """
-    This class provides a convenient object-oriented API that can be
-    used to initiate power on/off and boot device selection for pxe
-    and disk boot devices using ipmi commands.
-    """
-
-    def __init__(self, context, host):
-        self.ipmitool = IPMITool(context, host)
-
-    def power_on_boot_master(self):
-        self.ipmitool.set_to_boot_from_pxe()
-        if self.ipmitool.get_power_status() == 'on':
-            self.ipmitool.power_off()
-        self.ipmitool.power_on()
-
-    def power_on_boot_image(self):
-        self.ipmitool.set_to_boot_from_disk()
-        if self.ipmitool.get_power_status() == 'on':
-            self.ipmitool.power_off()
-        self.ipmitool.power_on()
-
-    def power_off(self):
-        if self.ipmitool.get_power_status() == 'on':
-            self.ipmitool.power_off()
-

=== removed file 'lava_dispatcher/job.py'
--- lava_dispatcher/job.py	2013-09-06 10:53:29 +0000
+++ lava_dispatcher/job.py	1970-01-01 00:00:00 +0000
@@ -1,432 +0,0 @@ 
-# Copyright (C) 2011-2012 Linaro Limited
-#
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import json
-import logging
-import pexpect
-import time
-import traceback
-import hashlib
-import simplejson
-from json_schema_validator.schema import Schema
-from json_schema_validator.validator import Validator
-
-from lava_dispatcher.actions import get_all_cmds
-from lava_dispatcher.context import LavaContext
-from lava_dispatcher.errors import (
-    CriticalError,
-    TimeoutError,
-    GeneralError,
-    ADBConnectError,
-)
-
-
-job_schema = {
-    'type': 'object',
-    'additionalProperties': {},
-    'properties': {
-        'actions': {
-            'items': {
-                'type': 'object',
-                'properties': {
-                    'command': {
-                        'optional': False,
-                        'type': 'string',
-                    },
-                    'parameters': {
-                        'optional': True,
-                        'type': 'object',
-                    },
-                    'metadata': {
-                        'optional': True,
-                    },
-                },
-                'additionalProperties': False,
-            },
-        },
-        'device_type': {
-            'type': 'string',
-            'optional': True,
-        },
-        'device_group': {
-            'type': 'array',
-            'additionalProperties': False,
-            'optional': True,
-            'items': {
-                'type': 'object',
-                'properties': {
-                    'role': {
-                        'optional': False,
-                        'type': 'string',
-                    },
-                    'count': {
-                        'optional': False,
-                        'type': 'integer',
-                    },
-                    'device_type': {
-                        'optional': False,
-                        'type': 'string',
-                    },
-                    'tags': {
-                        'type': 'array',
-                        'uniqueItems': True,
-                        'items': {'type': 'string'},
-                        'optional': True,
-                    },
-                },
-            },
-        },
-        'job_name': {
-            'type': 'string',
-            'optional': True,
-        },
-        'health_check': {
-            'optional': True,
-            'default': False,
-        },
-        'target': {
-            'type': 'string',
-            'optional': True,
-        },
-        'target_group': {
-            'type': 'string',
-            'optional': True,
-        },
-        'port': {
-            'type': 'integer',
-            'optional': True,
-        },
-        'hostname': {
-            'type': 'string',
-            'optional': True,
-        },
-        'role': {
-            'type': 'string',
-            'optional': True,
-        },
-        'group_size': {
-            'type': 'integer',
-            'optional': True,
-        },
-        'timeout': {
-            'type': 'integer',
-            'optional': False,
-        },
-        'logging_level': {
-            'type': 'string',
-            'enum': ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"],
-            'optional': True,
-        },
-        'tags': {
-            'type': 'array',
-            'uniqueItems': True,
-            'items': {'type': 'string'},
-            'optional': True,
-        },
-        'priority': {
-            'type': 'string',
-            'optional': True,
-        },
-    },
-}
-
-
-def validate_job_data(job_data):
-    schema = Schema(job_schema)
-    Validator.validate(schema, job_data)
-    lava_commands = get_all_cmds()
-    for action in job_data['actions']:
-        command_name = action['command']
-        command = lava_commands.get(command_name)
-        if command is None:
-            raise ValueError("action %r not known" % command_name)
-        command.validate_parameters(action.get('parameters'))
-
-
-class LavaTestJob(object):
-    def __init__(self, job_json, oob_file, config, output_dir):
-        self.job_status = 'pass'
-        self.load_job_data(job_json)
-        self.context = LavaContext(
-            self.target, config, oob_file, self.job_data, output_dir)
-
-    def load_job_data(self, job_json):
-        self.job_data = json.loads(job_json)
-
-    @property
-    def target(self):
-        return self.job_data['target']
-
-    @property
-    def tags(self):
-        return self.job_data.get('tags', [])
-
-    @property
-    def logging_level(self):
-        try:
-            return self.job_data['logging_level']
-        except:
-            return None
-
-    def run(self, transport=None, group_data=None):
-        self.context.assign_transport(transport)
-        self.context.assign_group_data(group_data)
-        validate_job_data(self.job_data)
-        self._set_logging_level()
-        lava_commands = get_all_cmds()
-
-        if self.job_data['actions'][-1]['command'].startswith(
-                "submit_results"):
-            submit_results = self.job_data['actions'].pop(-1)
-        else:
-            submit_results = None
-
-        metadata = {
-            'target.hostname': self.target,
-        }
-
-        if 'device_type' in self.job_data:
-            metadata['target.device_type'] = self.job_data['device_type']
-        self.context.test_data.add_metadata(metadata)
-
-        self.context.test_data.add_tags(self.tags)
-
-        if 'target' in self.job_data:
-            metadata['target'] = self.job_data['target']
-            self.context.test_data.add_metadata(metadata)
-
-        if 'logging_level' in self.job_data:
-            metadata['logging_level'] = self.job_data['logging_level']
-            self.context.test_data.add_metadata(metadata)
-
-        if 'target_group' in self.job_data:
-            metadata['target_group'] = self.job_data['target_group']
-            self.context.test_data.add_metadata(metadata)
-
-            if 'role' in self.job_data:
-                metadata['role'] = self.job_data['role']
-                self.context.test_data.add_metadata(metadata)
-
-            if 'group_size' in self.job_data:
-                metadata['group_size'] = self.job_data['group_size']
-                self.context.test_data.add_metadata(metadata)
-
-            logging.debug("[ACTION-B] Multi Node test!")
-            logging.debug("[ACTION-B] target_group is (%s)." % self.context.test_data.metadata['target_group'])
-        else:
-            logging.debug("[ACTION-B] Single node test!")
-
-        try:
-            job_length = len(self.job_data['actions'])
-            job_num = 0
-            for cmd in self.job_data['actions']:
-                job_num += 1
-                params = cmd.get('parameters', {})
-                if cmd.get('command').startswith('lava_android_test'):
-                    if not params.get('timeout') and \
-                       self.job_data.get('timeout'):
-                        params['timeout'] = self.job_data['timeout']
-                logging.info("[ACTION-B] %s is started with %s" %
-                             (cmd['command'], params))
-                metadata = cmd.get('metadata', {})
-                self.context.test_data.add_metadata(metadata)
-                action = lava_commands[cmd['command']](self.context)
-                err = None
-                try:
-                    status = 'fail'
-                    action.run(**params)
-                except ADBConnectError as err:
-                    logging.info("ADBConnectError")
-                    if cmd.get('command') == 'boot_linaro_android_image':
-                        logging.warning(('[ACTION-E] %s failed to create the'
-                                         ' adb connection') % (cmd['command']))
-                        ## clear the session on the serial and wait a while
-                        ## and not put the following 3 sentences into the
-                        ## boot_linaro_android_image method just for
-                        ## avoiding effects when the method being called
-                        ## in other places
-                        logging.warning(
-                            'Now will reboot the image to try again')
-                        self.context.client.proc.sendcontrol("c")
-                        self.context.client.proc.sendline("")
-                        time.sleep(5)
-                        self.context.client.boot_linaro_android_image(
-                            adb_check=True)
-                        ## mark it as pass if the second boot works
-                        status = 'pass'
-                except TimeoutError as err:
-                    logging.info("TimeoutError")
-                    if cmd.get('command').startswith('lava_android_test'):
-                        logging.warning("[ACTION-E] %s times out." %
-                                        (cmd['command']))
-                        if job_num == job_length:
-                            ## not reboot the android image for
-                            ## the last test action
-                            pass
-                        else:
-                            ## clear the session on the serial and wait a while
-                            ## and not put the following 3 sentences into the
-                            ## boot_linaro_android_image method just for
-                            ## avoiding effects when the method being called
-                            ## in other places
-                            logging.warning(
-                                "Now the android image will be rebooted")
-                            self.context.client.proc.sendcontrol("c")
-                            self.context.client.proc.sendline("")
-                            time.sleep(5)
-                            self.context.client.boot_linaro_android_image()
-                    else:
-                        logging.warn("Unhandled timeout condition")
-                        continue
-                except CriticalError as err:
-                    logging.info("CriticalError")
-                    raise
-                except (pexpect.TIMEOUT, GeneralError) as err:
-                    logging.warn("pexpect timed out, pass with status %s" % status)
-                    pass
-                except Exception as err:
-                    logging.info("General Exception: %s" % unicode(str(err)))
-                    raise
-                else:
-                    logging.debug("setting status pass")
-                    status = 'pass'
-                finally:
-                    logging.debug("finally status %s" % status)
-                    err_msg = ""
-                    if status == 'fail':
-                        # XXX mwhudson, 2013-01-17: I have no idea what this
-                        # code is doing.
-                        logging.warning(
-                            "[ACTION-E] %s is finished with error (%s)." %
-                            (cmd['command'], err))
-                        err_msg = ("Lava failed at action %s with error:"
-                                   "%s\n") % (cmd['command'],
-                                              unicode(str(err),
-                                                      'ascii', 'replace'))
-                        if cmd['command'] == 'lava_test_run':
-                            err_msg += "Lava failed on test: %s" % \
-                                       params.get('test_name', "Unknown")
-                        err_msg = err_msg + traceback.format_exc()
-                        self.context.log("ErrorMessage: %s" % unicode(str(err)))
-                        self.context.log(err_msg)
-                    else:
-                        logging.info(
-                            "[ACTION-E] %s is finished successfully." %
-                            (cmd['command']))
-                        err_msg = ""
-                    self.context.test_data.add_result(
-                        action.test_name(**params), status, err_msg)
-        except:
-            #Capture all user-defined and non-user-defined critical errors
-            self.context.test_data.job_status = 'fail'
-            raise
-        finally:
-            device_version = self.context.get_device_version() or 'error'
-            self.context.test_data.add_metadata({
-                'target.device_version': device_version
-            })
-            if 'target_group' in self.job_data:
-                # all nodes call aggregate, even if there is no submit_results command
-                self._aggregate_bundle(transport, lava_commands, submit_results)
-            elif submit_results:
-                params = submit_results.get('parameters', {})
-                action = lava_commands[submit_results['command']](
-                    self.context)
-                params_for_display = params.copy()
-                if 'token' in params_for_display:
-                    params_for_display['token'] = '<HIDDEN>'
-                try:
-                    logging.info("Submitting the test result with parameters = %s", params_for_display)
-                    action.run(**params)
-                except Exception as err:
-                    logging.error("Failed to submit the test result. Error = %s", err)
-                    raise
-            self.context.finish()
-
-    def _aggregate_bundle(self, transport, lava_commands, submit_results):
-        if "sub_id" not in self.job_data:
-            raise ValueError("Invalid MultiNode JSON - missing sub_id")
-        # all nodes call aggregate, even if there is no submit_results command
-        base_msg = {
-            "request": "aggregate",
-            "bundle": None,
-            "sub_id": self.job_data['sub_id']
-        }
-        if not submit_results:
-            transport(json.dumps(base_msg))
-            return
-        # need to collate this bundle before submission, then send to the coordinator.
-        params = submit_results.get('parameters', {})
-        action = lava_commands[submit_results['command']](self.context)
-        token = None
-        group_name = self.job_data['target_group']
-        if 'token' in params:
-            token = params['token']
-        # the transport layer knows the client_name for this bundle.
-        bundle = action.collect_bundles(**params)
-        # catch parse errors in bundles
-        try:
-            bundle_str = simplejson.dumps(bundle)
-        except Exception as e:
-            logging.error("Unable to parse bundle '%s' - %s" % (bundle, e))
-            transport(json.dumps(base_msg))
-            return
-        sha1 = hashlib.sha1()
-        sha1.update(bundle_str)
-        base_msg['bundle'] = sha1.hexdigest()
-        reply = transport(json.dumps(base_msg))
-        # if this is sub_id zero, this will wait until the last call to aggregate
-        # and then the reply is the full list of bundle checksums.
-        if reply == "ack":
-            # coordinator has our checksum for this bundle, submit as pending to launch_control
-            action.submit_pending(bundle, params['server'], params['stream'], token, group_name)
-            logging.info("Result bundle %s has been submitted to Dashboard as pending." % base_msg['bundle'])
-            return
-        elif reply == "nack":
-            logging.error("Unable to submit result bundle checksum to coordinator")
-            return
-        else:
-            if self.job_data["sub_id"].endswith(".0"):
-                # submit this bundle, add it to the pending list which is indexed by group_name and post the set
-                logging.info("Submitting bundle '%s' and aggregating with pending group results." % base_msg['bundle'])
-                action.submit_group_list(bundle, params['server'], params['stream'], token, group_name)
-                return
-            else:
-                raise ValueError("API error - collated bundle has been sent to the wrong node.")
-
-    def _set_logging_level(self):
-        # set logging level is optional
-        level = self.logging_level
-        # CRITICAL, ERROR, WARNING, INFO or DEBUG
-        if level:
-            if level == 'DEBUG':
-                logging.root.setLevel(logging.DEBUG)
-            elif level == 'INFO':
-                logging.root.setLevel(logging.INFO)
-            elif level == 'WARNING':
-                logging.root.setLevel(logging.WARNING)
-            elif level == 'ERROR':
-                logging.root.setLevel(logging.ERROR)
-            elif level == 'CRITICAL':
-                logging.root.setLevel(logging.CRITICAL)
-            else:
-                logging.warning("Unknown logging level in the job '%s'. "
-                                "Allow level are : CRITICAL, ERROR, "
-                                "WARNING, INFO or DEBUG" % level)

=== removed file 'lava_dispatcher/lava_test_shell.py'
--- lava_dispatcher/lava_test_shell.py	2013-07-18 14:01:21 +0000
+++ lava_dispatcher/lava_test_shell.py	1970-01-01 00:00:00 +0000
@@ -1,380 +0,0 @@ 
-# Copyright (C) 2011-2012 Linaro Limited
-#
-# Author: Andy Doan <andy.doan@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-"""
-Import test results from disk.
-
-This module contains functions to create a bundle from the disk files created
-by a lava-test-shell run.
-"""
-
-import base64
-import datetime
-import decimal
-import mimetypes
-import yaml
-import logging
-import os
-import re
-
-from lava_dispatcher.test_data import create_attachment
-
-
-def _get_cpus(cpuinfo):
-    devices = []
-    cpu_type = '?'
-    cpu_cores = 0
-    cpu_attrs = {}
-    board_type = '?'
-    board_rev = '?'
-    for line in cpuinfo.split('\n'):
-        if len(line.strip()) == 0:
-            continue
-        (key, val) = line.split(':', 1)
-        key = key.strip()
-        val = val.strip()
-
-        if key == 'Processor':
-            cpu_type = val
-        elif key == 'processor':
-            cpu_cores += 1
-        elif key == 'Hardware':
-            board_type = val
-        elif key == 'Revision':
-            board_rev = val
-        else:
-            cpu_attrs[key] = val
-
-    cpu_attrs['cpu_type'] = cpu_type
-
-    for i in xrange(cpu_cores):
-        x = {
-            'device_type': 'device.cpu',
-            'description': 'Processor #%d' % i,
-            'attributes': cpu_attrs
-        }
-        devices.append(x)
-
-    devices.append({
-        'device_type': 'device.board',
-        'description': board_type,
-        'attributes': {'revision': board_rev}
-    })
-
-    return devices
-
-
-def _get_mem(meminfo):
-    for line in meminfo.split('\n'):
-        if line.startswith('MemTotal'):
-            (k, v) = line.split(':', 1)
-            return {
-                'device_type': 'device.mem',
-                'description': '%s of RAM' % v.strip(),
-            }
-
-    return None
-
-
-def _get_hw_context(cpuinfo, meminfo):
-    devices = []
-    if cpuinfo:
-        devices.extend(_get_cpus(cpuinfo))
-    if meminfo:
-        devices.append(_get_mem(meminfo))
-    return {'devices': devices}
-
-
-def _get_sw_context(build, pkgs, sw_sources):
-    ctx = {'image': {'name': build}}
-
-    pkglist = []
-    pattern = re.compile(
-        ("^\s*package:\s*(?P<package_name>[^:]+?)\s*:"
-         "\s*(?P<version>[^\s].+)\s*$"), re.M)
-    for line in pkgs.split('\n'):
-        match = pattern.search(line)
-        if match:
-            name, version = match.groups()
-            pkglist.append({'name': name.strip(), 'version': version})
-
-    ctx['packages'] = pkglist
-    ctx['sources'] = sw_sources
-    return ctx
-
-
-def _attachments_from_dir(from_dir):
-    attachments = []
-    for filename, filepath in _directory_names_and_paths(from_dir, ignore_missing=True):
-        if filename.endswith('.mimetype'):
-            continue
-        mime_type = _read_content(filepath + '.mimetype', ignore_missing=True).strip()
-        if not mime_type:
-            mime_type = mimetypes.guess_type(filepath)[0]
-            if mime_type is None:
-                mime_type = 'application/octet-stream'
-        attachments.append(
-            create_attachment(filename, _read_content(filepath), mime_type))
-    return attachments
-
-
-def _attributes_from_dir(from_dir):
-    attributes = {}
-    for filename, filepath in _directory_names_and_paths(from_dir, ignore_missing=True):
-        if os.path.isfile(filepath):
-            attributes[filename] = _read_content(filepath)
-    return attributes
-
-
-def _result_to_dir(test_result, res_dir):
-
-    def w(name, content):
-        with open(os.path.join(res_dir, name), 'w') as f:
-            f.write(str(content) + '\n')
-
-    for name in 'result', 'measurement', 'units', 'message', 'timestamp', 'duration':
-        if name in test_result:
-            w(name, test_result[name])
-
-    os.makedirs(os.path.join(res_dir, 'attachments'))
-
-    for attachment in test_result.get('attachments', []):
-        path = 'attachments/' + attachment['pathname']
-        w(path, base64.b64decode(attachment['content']))
-        w(path + '.mimetype', attachment['mime_type'])
-
-    os.makedirs(os.path.join(res_dir, 'attributes'))
-
-    for attrname, attrvalue in test_result.get('attributes', []).items():
-        path = 'attributes/' + attrname
-        w(path, attrvalue)
-
-
-def _result_from_dir(res_dir, test_case_id=None):
-    if not test_case_id:
-        test_case_id = os.path.basename(res_dir)
-    result = {
-        'test_case_id': test_case_id
-    }
-
-    for fname in 'result', 'measurement', 'units', 'message', 'timestamp', 'duration':
-        fpath = os.path.join(res_dir, fname)
-        if os.path.isfile(fpath):
-            result[fname] = _read_content(fpath).strip()
-
-    if 'measurement' in result:
-        try:
-            result['measurement'] = decimal.Decimal(result['measurement'])
-        except decimal.InvalidOperation:
-            logging.warning("Invalid measurement for %s: %s" % (res_dir, result['measurement']))
-            del result['measurement']
-
-    result['attachments'] = _attachments_from_dir(os.path.join(res_dir, 'attachments'))
-    result['attributes'] = _attributes_from_dir(os.path.join(res_dir, 'attributes'))
-
-    return result
-
-
-def _merge_results(dest, src):
-    tc_id = dest['test_case_id']
-    assert tc_id == src['test_case_id']
-    for attrname in 'result', 'measurement', 'units', 'message', 'timestamp', 'duration':
-        if attrname in dest:
-            if attrname in src:
-                if dest[attrname] != src[attrname]:
-                    logging.warning(
-                        'differing values for %s in result for %s: %s and %s',
-                        attrname, tc_id, dest[attrname], src[attrname])
-        else:
-            if attrname in src:
-                dest[attrname] = src
-    dest.setdefault('attachments', []).extend(src.get('attachments', []))
-    dest.setdefault('attributes', {}).update(src.get('attributes', []))
-
-
-def _get_test_results(test_run_dir, testdef, stdout):
-    results_from_log_file = []
-    fixupdict = {}
-    pattern = None
-
-    if 'parse' in testdef:
-        if 'fixupdict' in testdef['parse']:
-            fixupdict = testdef['parse']['fixupdict']
-        if 'pattern' in testdef['parse']:
-            pattern = re.compile(testdef['parse']['pattern'])
-    else:
-        defpat = "(?P<test_case_id>.*-*)\\s+:\\s+(?P<result>(PASS|pass|FAIL|fail|SKIP|skip|UNKNOWN|unknown))"
-        pattern = re.compile(defpat)
-        fixupdict = {'PASS': 'pass', 'FAIL': 'fail', 'SKIP': 'skip',
-                     'UNKNOWN': 'unknown'}
-        logging.warning("""Using a default pattern to parse the test result. This may lead to empty test result in certain cases.""")
-
-    if not pattern:
-        logging.debug("No pattern set")
-    for lineno, line in enumerate(stdout.split('\n'), 1):
-        match = pattern.match(line.strip())
-        if match:
-            res = match.groupdict()
-            if 'result' in res:
-                if res['result'] in fixupdict:
-                    res['result'] = fixupdict[res['result']]
-                if res['result'] not in ('pass', 'fail', 'skip', 'unknown'):
-                    logging.error('bad test result line: %s' % line.strip())
-                    continue
-            res['log_lineno'] = lineno
-            res['log_filename'] = 'stdout.log'
-            if 'measurement' in res:
-                try:
-                    res['measurement'] = decimal.Decimal(res['measurement'])
-                except decimal.InvalidOperation:
-                    logging.warning("Invalid measurement %s" % (
-                        res['measurement']))
-                    del res['measurement']
-            results_from_log_file.append(res)
-
-    results_from_directories = []
-    results_from_directories_by_id = {}
-
-    result_names_and_paths = _directory_names_and_paths(
-        os.path.join(test_run_dir, 'results'), ignore_missing=True)
-    result_names_and_paths = [
-        (name, path) for (name, path) in result_names_and_paths
-        if os.path.isdir(path)]
-    result_names_and_paths.sort(key=lambda (name, path): os.path.getmtime(path))
-
-    for name, path in result_names_and_paths:
-        r = _result_from_dir(path)
-        results_from_directories_by_id[name] = (r, len(results_from_directories))
-        results_from_directories.append(r)
-
-    for res in results_from_log_file:
-        if res.get('test_case_id') in results_from_directories_by_id:
-            dir_res, index = results_from_directories_by_id[res['test_case_id']]
-            results_from_directories[index] = None
-            _merge_results(res, dir_res)
-
-    for res in results_from_directories:
-        if res is not None:
-            results_from_log_file.append(res)
-
-    return results_from_log_file
-
-
-def _get_run_attachments(test_run_dir, testdef, stdout):
-    attachments = [create_attachment('stdout.log', stdout),
-                   create_attachment('testdef.yaml', testdef)]
-    return_code = _read_content(os.path.join(test_run_dir, 'return_code'), ignore_missing=True)
-    if return_code:
-        attachments.append(create_attachment('return_code', return_code))
-
-    attachments.extend(
-        _attachments_from_dir(os.path.join(test_run_dir, 'attachments')))
-
-    return attachments
-
-
-def _get_run_testdef_metadata(test_run_dir):
-    testdef_metadata = {
-        'version': None,
-        'description': None,
-        'format': None,
-        'location': None,
-        'url': None,
-        'os': None,
-        'devices': None,
-        'environment': None
-    }
-
-    metadata = _read_content(os.path.join(test_run_dir, 'testdef_metadata'))
-    if metadata is not '':
-        testdef_metadata = yaml.safe_load(metadata)
-
-    return testdef_metadata
-
-
-def _get_test_run(test_run_dir, hwcontext, build, pkginfo, testdefs_by_uuid):
-    now = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
-
-    testdef = _read_content(os.path.join(test_run_dir, 'testdef.yaml'))
-    stdout = _read_content(os.path.join(test_run_dir, 'stdout.log'))
-    uuid = _read_content(os.path.join(test_run_dir, 'analyzer_assigned_uuid'))
-    attachments = _get_run_attachments(test_run_dir, testdef, stdout)
-    attributes = _attributes_from_dir(os.path.join(test_run_dir, 'attributes'))
-
-    testdef = yaml.safe_load(testdef)
-
-    if uuid in testdefs_by_uuid:
-        sw_sources = testdefs_by_uuid[uuid]._sw_sources
-    else:
-        logging.warning("no software sources found for run with uuid %s" % uuid)
-        sw_sources = []
-    swcontext = _get_sw_context(build, pkginfo, sw_sources)
-
-    return {
-        'test_id': testdef.get('metadata').get('name'),
-        'analyzer_assigned_date': now,
-        'analyzer_assigned_uuid': uuid,
-        'time_check_performed': False,
-        'test_results': _get_test_results(test_run_dir, testdef, stdout),
-        'software_context': swcontext,
-        'hardware_context': hwcontext,
-        'attachments': attachments,
-        'attributes': attributes,
-        'testdef_metadata': _get_run_testdef_metadata(test_run_dir)
-    }
-
-
-def _read_content(filepath, ignore_missing=False):
-    if not os.path.exists(filepath) and ignore_missing:
-        return ''
-    with open(filepath, 'r') as f:
-        return f.read()
-
-
-def _directory_names_and_paths(dirpath, ignore_missing=False):
-    if not os.path.exists(dirpath) and ignore_missing:
-        return []
-    return [(filename, os.path.join(dirpath, filename))
-            for filename in os.listdir(dirpath)]
-
-
-def get_bundle(results_dir, testdefs_by_uuid):
-    """
-    iterates through a results directory to build up a bundle formatted for
-    the LAVA dashboard
-    """
-    testruns = []
-    cpuinfo = _read_content(os.path.join(results_dir, 'hwcontext/cpuinfo.txt'), ignore_missing=True)
-    meminfo = _read_content(os.path.join(results_dir, 'hwcontext/meminfo.txt'), ignore_missing=True)
-    hwctx = _get_hw_context(cpuinfo, meminfo)
-
-    build = _read_content(os.path.join(results_dir, 'swcontext/build.txt'))
-    pkginfo = _read_content(os.path.join(results_dir, 'swcontext/pkgs.txt'), ignore_missing=True)
-
-    for test_run_name, test_run_path in _directory_names_and_paths(results_dir):
-        if test_run_name in ('hwcontext', 'swcontext'):
-            continue
-        if os.path.isdir(test_run_path):
-            try:
-                testruns.append(_get_test_run(test_run_path, hwctx, build, pkginfo, testdefs_by_uuid))
-            except:
-                logging.exception('error processing results for: %s' % test_run_name)
-
-    return {'test_runs': testruns, 'format': 'Dashboard Bundle Format 1.6'}

=== removed directory 'lava_dispatcher/signals'
=== removed file 'lava_dispatcher/signals/__init__.py'
--- lava_dispatcher/signals/__init__.py	2013-08-23 14:39:05 +0000
+++ lava_dispatcher/signals/__init__.py	1970-01-01 00:00:00 +0000
@@ -1,261 +0,0 @@ 
-# Copyright (C) 2012 Linaro Limited
-#
-# Author: Andy Doan <andy.doan@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import contextlib
-import logging
-import tempfile
-import json
-
-from lava_dispatcher.utils import rmtree
-
-from lava_dispatcher.lava_test_shell import (
-    _result_to_dir,
-    _result_from_dir,
-)
-
-
-class BaseSignalHandler(object):
-
-    def __init__(self, testdef_obj):
-        self.testdef_obj = testdef_obj
-
-    def start(self):
-        pass
-
-    def end(self):
-        pass
-
-    def starttc(self, test_case_id):
-        pass
-
-    def endtc(self, test_case_id):
-        pass
-
-    def custom_signal(self, signame, params):
-        pass
-
-    def postprocess_test_run(self, test_run):
-        pass
-
-
-class SignalHandler(BaseSignalHandler):
-
-    def __init__(self, testdef_obj):
-        BaseSignalHandler.__init__(self, testdef_obj)
-        self._case_data = {}
-        self._cur_case_id = None
-        self._cur_case_data = None
-
-    def starttc(self, test_case_id):
-        if self._cur_case_data:
-            logging.warning(
-                "unexpected cur_case_data %s", self._cur_case_data)
-        self._cur_case_id = test_case_id
-        data = None
-        try:
-            data = self.start_testcase(test_case_id)
-        except:
-            logging.exception("start_testcase failed for %s", test_case_id)
-        self._cur_case_data = self._case_data[test_case_id] = data
-
-    def endtc(self, test_case_id):
-        if self._cur_case_id != test_case_id:
-            logging.warning(
-                "stoptc for %s received but expecting %s",
-                test_case_id, self._cur_case_id)
-        else:
-            try:
-                self.end_testcase(test_case_id, self._cur_case_data)
-            except:
-                logging.exception(
-                    "stop_testcase failed for %s", test_case_id)
-        self._cur_case_data = None
-
-    def postprocess_test_run(self, test_run):
-        for test_result in test_run['test_results']:
-            tc_id = test_result.get('test_case_id')
-            if not tc_id:
-                continue
-            if tc_id not in self._case_data:
-                continue
-            data = self._case_data[tc_id]
-            try:
-                self.postprocess_test_result(test_result, data)
-            except:
-                logging.exception("postprocess_test_result failed for %s", tc_id)
-
-    @contextlib.contextmanager
-    def _result_as_dir(self, test_result):
-        scratch_dir = self.testdef_obj.context.client.target_device.scratch_dir
-        rdir = tempfile.mkdtemp(dir=scratch_dir)
-        try:
-            tcid = test_result['test_case_id']
-            _result_to_dir(test_result, rdir)
-            yield rdir
-            test_result.clear()
-            test_result.update(_result_from_dir(rdir, tcid))
-        finally:
-            rmtree(rdir)
-
-    def start_testcase(self, test_case_id):
-        return {}
-
-    def end_testcase(self, test_case_id, data):
-        pass
-
-    def postprocess_test_result(self, test_result, case_data):
-        pass
-
-
-class FailedCall(Exception):
-    """
-    Just need a plain Exception to trigger the failure of the
-    signal handler and set keep_running to False.
-    """
-
-    def __init__(self, call):
-        Exception.__init__(self, "%s call failed" % call)
-
-
-class SignalDirector(object):
-
-    def __init__(self, client, testdefs_by_uuid, context):
-        self.client = client
-        self.testdefs_by_uuid = testdefs_by_uuid
-        self._test_run_data = []
-        self._cur_handler = None
-        self.context = context
-        self.connection = None
-
-    def signal(self, name, params):
-        handler = getattr(self, '_on_' + name, None)
-        if not handler and self._cur_handler:
-            handler = self._cur_handler.custom_signal
-            params = [name] + list(params)
-        if handler:
-            try:
-                handler(*params)
-            except:
-                logging.exception("handling signal %s failed", name)
-                return False
-            return True
-
-    def set_connection(self, connection):
-        self.connection = connection
-
-    def _on_STARTRUN(self, test_run_id, uuid):
-        self._cur_handler = None
-        testdef_obj = self.testdefs_by_uuid.get(uuid)
-        if testdef_obj:
-            self._cur_handler = testdef_obj.handler
-        if self._cur_handler:
-            self._cur_handler.start()
-
-    def _on_ENDRUN(self, test_run_id, uuid):
-        if self._cur_handler:
-            self._cur_handler.end()
-
-    def _on_STARTTC(self, test_case_id):
-        if self._cur_handler:
-            self._cur_handler.starttc(test_case_id)
-
-    def _on_ENDTC(self, test_case_id):
-        if self._cur_handler:
-            self._cur_handler.endtc(test_case_id)
-
-    def _on_SEND(self, *args):
-        arg_length = len(args)
-        if arg_length == 1:
-            msg = {"request": "lava_send", "messageID": args[0], "message": None}
-        else:
-            message_id = args[0]
-            remainder = args[1:arg_length]
-            logging.debug("%d key value pair(s) to be sent." % int(len(remainder)))
-            data = {}
-            for message in remainder:
-                detail = str.split(message, "=")
-                if len(detail) == 2:
-                    data[detail[0]] = detail[1]
-            msg = {"request": "lava_send", "messageID": message_id, "message": data}
-        logging.debug("Handling signal <LAVA_SEND %s>" % msg)
-        reply = self.context.transport(json.dumps(msg))
-        if reply == "nack":
-            raise FailedCall("LAVA_SEND nack")
-
-    def _on_SYNC(self, message_id):
-        if not self.connection:
-            logging.error("No connection available for on_SYNC")
-            return
-        logging.debug("Handling signal <LAVA_SYNC %s>" % message_id)
-        msg = {"request": "lava_sync", "messageID": message_id, "message": None}
-        reply = self.context.transport(json.dumps(msg))
-        message_str = ""
-        if reply == "nack":
-            message_str = " nack"
-        else:
-            message_str = ""
-        ret = self.connection.sendline("<LAVA_SYNC_COMPLETE%s>" % message_str)
-        logging.debug("runner._connection.sendline wrote %d bytes" % ret)
-
-    def _on_WAIT(self, message_id):
-        if not self.connection:
-            logging.error("No connection available for on_WAIT")
-            return
-        logging.debug("Handling signal <LAVA_WAIT %s>" % message_id)
-        msg = {"request": "lava_wait", "messageID": message_id, "message": None}
-        reply = self.context.transport(json.dumps(msg))
-        message_str = ""
-        if reply == "nack":
-            message_str = " nack"
-        else:
-            for target, messages in reply.items():
-                for key, value in messages.items():
-                    message_str += " %s:%s=%s" % (target, key, value)
-        self.connection.sendline("<LAVA_WAIT_COMPLETE%s>" % message_str)
-
-    def _on_WAIT_ALL(self, message_id, role=None):
-        if not self.connection:
-            logging.error("No connection available for on_WAIT_ALL")
-            return
-        logging.debug("Handling signal <LAVA_WAIT_ALL %s>" % message_id)
-        msg = {"request": "lava_wait_all", "messageID": message_id, "role": role}
-        reply = self.context.transport(json.dumps(msg))
-        message_str = ""
-        if reply == "nack":
-            message_str = " nack"
-        else:
-            #the reply format is like this :
-            #"{target:{key1:value, key2:value2, key3:value3},
-            #  target2:{key1:value, key2:value2, key3:value3}}"
-            for target, messages in reply.items():
-                for key, value in messages.items():
-                    message_str += " %s:%s=%s" % (target, key, value)
-        self.connection.sendline("<LAVA_WAIT_ALL_COMPLETE%s>" % message_str)
-
-    def postprocess_bundle(self, bundle):
-        for test_run in bundle['test_runs']:
-            uuid = test_run['analyzer_assigned_uuid']
-            testdef_obj = self.testdefs_by_uuid.get(uuid)
-            if testdef_obj.handler:
-                try:
-                    testdef_obj.handler.postprocess_test_run(test_run)
-                except:
-                    logging.exception(
-                        "postprocessing test run with uuid %s failed", uuid)

=== removed file 'lava_dispatcher/signals/armprobe.py'
--- lava_dispatcher/signals/armprobe.py	2012-12-18 23:58:02 +0000
+++ lava_dispatcher/signals/armprobe.py	1970-01-01 00:00:00 +0000
@@ -1,77 +0,0 @@ 
-import logging
-import os
-import subprocess
-import urlparse
-
-from lava_dispatcher.downloader import download_image
-from lava_dispatcher.signals import SignalHandler
-
-
-class ArmProbe(SignalHandler):
-
-    def __init__(self, testdef_obj, post_process_script, probe_args=None):
-        SignalHandler.__init__(self, testdef_obj)
-
-        self.scratch_dir = testdef_obj.context.client.target_device.scratch_dir
-
-        # post_process_script can be local to the repo or a URL
-        if not urlparse.urlparse(post_process_script).scheme:
-            self.post_process_script = os.path.join(
-                testdef_obj.repo, post_process_script)
-        else:
-            self.post_process_script = download_image(
-                post_process_script, testdef_obj.context, self.scratch_dir)
-        os.chmod(self.post_process_script, 755)  # make sure we can execute it
-
-        # build up the command we'll use for running the probe
-        config = testdef_obj.context.client.config
-        self.aep_channels = config.arm_probe_channels
-        self.aep_args = [
-            config.arm_probe_binary, '-C', config.arm_probe_config]
-        for c in self.aep_channels:
-            self.aep_args.append('-c')
-            self.aep_args.append(c)
-
-        for arg in probe_args:
-            self.aep_args.append(arg)
-
-    def start_testcase(self, test_case_id):
-        ofile = os.path.join(self.scratch_dir, '%s.out' % test_case_id)
-        efile = os.path.join(self.scratch_dir, '%s.err' % test_case_id)
-        ofile = open(ofile, 'w')
-        efile = open(efile, 'w')
-
-        proc = subprocess.Popen(
-            self.aep_args, stdout=ofile, stderr=efile, stdin=subprocess.PIPE)
-        # The arm-probe-binary allows you to write to stdin via a pipe and
-        # includes the content as comments in the header of its output
-        proc.stdin.write(
-            '# run from lava-test-shell with args: %r' % self.aep_args)
-        proc.stdin.close()
-
-        return {
-            'process': proc,
-            'logfile': ofile,
-            'errfile': efile,
-        }
-
-    def end_testcase(self, test_case_id, data):
-        proc = data['process']
-        proc.terminate()
-
-    def postprocess_test_result(self, test_result, data):
-        tcid = test_result['test_case_id']
-        logging.info('analyzing aep data for %s ...', tcid)
-
-        lfile = data['logfile']
-        efile = data['errfile']
-
-        lfile.close()
-        efile.close()
-
-        with self._result_as_dir(test_result) as result_dir:
-            args = [self.post_process_script, tcid, lfile.name, efile.name]
-            args.extend(self.aep_channels)
-
-            if subprocess.call(args, cwd=result_dir) != 0:
-                logging.warn('error calling post_process_script')

=== removed file 'lava_dispatcher/signals/duration.py'
--- lava_dispatcher/signals/duration.py	2013-07-16 16:07:13 +0000
+++ lava_dispatcher/signals/duration.py	1970-01-01 00:00:00 +0000
@@ -1,20 +0,0 @@ 
-import datetime
-import time
-
-from json_schema_validator.extensions import timedelta_extension
-from lava_dispatcher.signals import SignalHandler
-
-
-class AddDuration(SignalHandler):
-
-    def start_testcase(self, test_case_id):
-        return {
-            'starttime': time.time()
-        }
-
-    def end_testcase(self, test_case_id, data):
-        data['endtime'] = time.time()
-
-    def postprocess_test_result(self, test_result, data):
-        delta = datetime.timedelta(seconds=data['endtime'] - data['starttime'])
-        test_result['duration'] = timedelta_extension.to_json(delta)

=== removed file 'lava_dispatcher/signals/shellhooks.py'
--- lava_dispatcher/signals/shellhooks.py	2013-07-16 16:07:05 +0000
+++ lava_dispatcher/signals/shellhooks.py	1970-01-01 00:00:00 +0000
@@ -1,83 +0,0 @@ 
-from ConfigParser import NoOptionError
-import logging
-import shutil
-import subprocess
-import os
-import tempfile
-
-from lava_dispatcher.lava_test_shell import _read_content
-from lava_dispatcher.signals import SignalHandler
-from lava_dispatcher.test_data import create_attachment
-from lava_dispatcher.utils import mkdtemp
-
-
-class ShellHooks(SignalHandler):
-
-    def __init__(self, testdef_obj, handlers=None, device_config_vars=None):
-        if not device_config_vars:
-            device_config_vars = {}
-        if not handlers:
-            handlers = {}
-        SignalHandler.__init__(self, testdef_obj)
-        self.result_dir = mkdtemp()
-        self.handlers = handlers
-        self.scratch_dir = mkdtemp()
-        self.code_dir = os.path.join(self.scratch_dir, 'code')
-        shutil.copytree(testdef_obj.repo, self.code_dir)
-        device_config = testdef_obj.context.client.target_device.config
-        self.our_env = os.environ.copy()
-        for env_var, config_var in device_config_vars.iteritems():
-            try:
-                config_value = device_config.cp.get('__main__', config_var)
-            except NoOptionError:
-                logging.warning(
-                    "No value found for device config %s; leaving %s unset "
-                    "in environment", config_var, env_var)
-            else:
-                self.our_env[env_var] = config_value
-
-    def _invoke_hook(self, name, working_dir, args=None):
-        if not args:
-            args = []
-        script_name = self.handlers.get(name)
-        if not script_name:
-            return
-        script = os.path.join(self.code_dir, script_name)
-        if not os.path.exists(script):
-            logging.warning("handler script %s not found", script_name)
-            return
-        (fd, path) = tempfile.mkstemp(dir=self.code_dir)
-        status = subprocess.call(
-            [script] + args, cwd=working_dir, env=self.our_env,
-            stdout=fd, stderr=subprocess.STDOUT)
-        if status != 0:
-            logging.warning(
-                "%s handler script exited with code %s", name, status)
-        return path
-
-    def start_testcase(self, test_case_id):
-        case_dir = os.path.join(self.result_dir, test_case_id)
-        os.mkdir(case_dir)
-        case_data = {'case_dir': case_dir}
-        case_data['start_testcase_output'] = self._invoke_hook(
-            'start_testcase', case_dir)
-        return case_data
-
-    def end_testcase(self, test_case_id, case_data):
-        case_data['end_testcase_output'] = self._invoke_hook(
-            'end_testcase', case_data['case_dir'])
-
-    def postprocess_test_result(self, test_result, case_data):
-        with self._result_as_dir(test_result) as result_dir:
-            case_data['postprocess_test_result_output'] = self._invoke_hook(
-                'postprocess_test_result', case_data['case_dir'], [result_dir])
-
-        for key in 'start_testcase_output', 'end_testcase_output', \
-                'postprocess_test_result_output':
-            path = case_data.get(key)
-            if path is None:
-                continue
-            content = _read_content(path, ignore_missing=True)
-            if content:
-                test_result['attachments'].append(
-                    create_attachment(key + '.txt', _read_content(path)))

=== removed file 'lava_dispatcher/tarballcache.py'
--- lava_dispatcher/tarballcache.py	2013-07-16 16:08:54 +0000
+++ lava_dispatcher/tarballcache.py	1970-01-01 00:00:00 +0000
@@ -1,114 +0,0 @@ 
-# Copyright (C) 2012 Linaro Limited
-#
-# Author: Andy Doan <andy.doan@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import contextlib
-import errno
-import fcntl
-import logging
-import os
-
-import lava_dispatcher.utils as utils
-
-from lava_dispatcher.downloader import (
-    download_image,
-)
-
-
-def get_tarballs(context, image_url, scratch_dir, generator):
-    """
-    Tries to return a cached copy array of (boot_tgz, root_tgz). If no cache
-    exists for this image_url, then it:
-     * places a global lock for the image_url to prevent other dispatchers
-       from concurrently building tarballs for the same image
-     * downloads the image
-     * calls the generator function to build the tarballs
-
-    generator - a callback to a function that can generate the tarballs given
-    a local copy .img file
-    """
-    logging.info('try to find cached tarballs for %s' % image_url)
-    with _cache_locked(image_url, context.config.lava_cachedir) as cachedir:
-        boot_tgz = os.path.join(cachedir, 'boot.tgz')
-        root_tgz = os.path.join(cachedir, 'root.tgz')
-        data_file = os.path.join(cachedir, 'data')
-
-        if os.path.exists(boot_tgz) and os.path.exists(root_tgz):
-            data = _get_data(cachedir, data_file)
-            if data is not None:
-                logging.info('returning cached copies')
-                (boot_tgz, root_tgz) = _link(boot_tgz, root_tgz, scratch_dir)
-                return boot_tgz, root_tgz, data
-        else:
-            logging.info('no cache found for %s' % image_url)
-
-        _clear_cache(boot_tgz, root_tgz, data_file)
-        image = download_image(image_url, context, cachedir)
-        (boot_tgz, root_tgz, data) = generator(image)
-        with open(data_file, 'w') as f:
-            f.write(data)
-        _link(boot_tgz, root_tgz, cachedir)
-        os.unlink(image)
-        return boot_tgz, root_tgz, data
-
-
-def _link(boot_tgz, root_tgz, destdir):
-    dboot_tgz = os.path.join(destdir, 'boot.tgz')
-    droot_tgz = os.path.join(destdir, 'root.tgz')
-    os.link(boot_tgz, dboot_tgz)
-    os.link(root_tgz, droot_tgz)
-    return dboot_tgz, droot_tgz
-
-
-def _clear_cache(boot_tgz, root_tgz, data):
-    logging.info('Clearing cache contents')
-    if os.path.exists(boot_tgz):
-        os.unlink(boot_tgz)
-    if os.path.exists(root_tgz):
-        os.unlink(root_tgz)
-    if os.path.exists(data):
-        os.unlink(data)
-
-
-def _get_data(cachedir, data_file):
-    try:
-        with open(data_file, 'r') as f:
-            return f.read()
-    except IOError:
-        logging.warn('No data found for cached tarballs in %s' % cachedir)
-    return None
-
-
-@contextlib.contextmanager
-def _cache_locked(image_url, cachedir):
-    cachedir = utils.url_to_cache(image_url, cachedir).replace('.', '-')
-    try:
-        os.makedirs(cachedir)
-    except OSError as e:
-        if e.errno != errno.EEXIST:  # directory may already exist and is okay
-            raise
-
-    lockfile = os.path.join(cachedir, 'lockfile')
-    with open(lockfile, 'w') as f:
-        logging.info('aquiring lock for %s' % lockfile)
-        try:
-            fcntl.lockf(f, fcntl.LOCK_EX)
-            yield cachedir
-        finally:
-            fcntl.lockf(f, fcntl.LOCK_UN)

=== removed file 'lava_dispatcher/test_data.py'
--- lava_dispatcher/test_data.py	2013-07-16 16:08:33 +0000
+++ lava_dispatcher/test_data.py	1970-01-01 00:00:00 +0000
@@ -1,78 +0,0 @@ 
-# Copyright (C) 2011 Linaro Limited
-#
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-from datetime import datetime
-from uuid import uuid1
-import base64
-
-
-def create_attachment(pathname, content, mime_type='text/plain'):
-    return {
-        'pathname': pathname,
-        'mime_type': mime_type,
-        'content': base64.b64encode(content),
-    }
-
-
-class LavaTestData(object):
-    def __init__(self, test_id='lava'):
-        self.job_status = 'pass'
-        self.metadata = {}
-        self._test_run = {'test_results': [], 'attachments': [], 'tags': []}
-        self._test_run['test_id'] = test_id
-        self._assign_date()
-        self._assign_uuid()
-
-    def _assign_date(self):
-        TIMEFORMAT = '%Y-%m-%dT%H:%M:%SZ'
-        self._test_run['time_check_performed'] = False
-        self._test_run['analyzer_assigned_date'] = datetime.strftime(
-            datetime.now(), TIMEFORMAT)
-
-    def _assign_uuid(self):
-        self._test_run['analyzer_assigned_uuid'] = str(uuid1())
-
-    def add_result(self, test_case_id, result, message=""):
-        result_data = {
-            'test_case_id': test_case_id,
-            'result': result,
-            'message': message
-        }
-        self._test_run['test_results'].append(result_data)
-
-    def add_attachments(self, attachments):
-        self._test_run['attachments'].extend(attachments)
-
-    def add_tag(self, tag):
-        self._test_run['tags'].append(tag)
-
-    def add_tags(self, tags):
-        for tag in tags:
-            self.add_tag(tag)
-
-    def add_metadata(self, metadata):
-        self.metadata.update(metadata)
-
-    def get_metadata(self):
-        return self.metadata
-
-    def get_test_run(self):
-        self.add_result('job_complete', self.job_status)
-        return self._test_run

=== removed directory 'lava_dispatcher/tests'
=== removed file 'lava_dispatcher/tests/__init__.py'
--- lava_dispatcher/tests/__init__.py	2013-07-16 16:09:31 +0000
+++ lava_dispatcher/tests/__init__.py	1970-01-01 00:00:00 +0000
@@ -1,10 +0,0 @@ 
-import unittest
-
-
-def test_suite():
-    module_names = [
-        'lava_dispatcher.tests.test_config',
-        'lava_dispatcher.tests.test_device_version',
-    ]
-    loader = unittest.TestLoader()
-    return loader.loadTestsFromNames(module_names)

=== removed file 'lava_dispatcher/tests/helper.py'
--- lava_dispatcher/tests/helper.py	2013-08-28 14:55:50 +0000
+++ lava_dispatcher/tests/helper.py	1970-01-01 00:00:00 +0000
@@ -1,63 +0,0 @@ 
-# Copyright (C) 2012 Linaro Limited
-#
-# Author: Antonio Terceiro <antonio.terceiro@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses>.
-
-import os
-from lava_dispatcher.config import get_device_config
-import lava_dispatcher.config
-
-tmp_dir = os.getenv("TMPDIR") or '/tmp'
-tmp_config_dir = os.path.join(tmp_dir, 'lava-dispatcher-config')
-
-
-def create_config(name, data):
-    filename = os.path.join(tmp_config_dir, name)
-    if not os.path.exists(os.path.dirname(filename)):
-        os.mkdir(os.path.dirname(filename))
-    with open(filename, 'w') as f:
-        for key in data.keys():
-            f.write("%s = %s\n" % (key, data[key]))
-
-
-def create_device_config(name, data):
-    create_config("devices/%s.conf" % name, data)
-    lava_dispatcher.config.custom_config_path = tmp_config_dir
-    config = get_device_config(name)
-    lava_dispatcher.config.custom_config_path = None
-    return config
-
-
-def setup_config_dir():
-    os.mkdir(tmp_config_dir)
-
-
-def cleanup_config_dir():
-    os.system('rm -rf %s' % tmp_config_dir)
-
-from unittest import TestCase
-
-
-class LavaDispatcherTestCase(TestCase):
-
-    def setUp(self):
-        setup_config_dir()
-        lava_dispatcher.config.custom_config_path = tmp_config_dir
-
-    def tearDown(self):
-        lava_dispatcher.config.custom_config_path = None
-        cleanup_config_dir()

=== removed directory 'lava_dispatcher/tests/test-config'
=== removed directory 'lava_dispatcher/tests/test-config/bin'
=== removed file 'lava_dispatcher/tests/test-config/bin/fake-qemu'
--- lava_dispatcher/tests/test-config/bin/fake-qemu	2013-08-12 09:34:03 +0000
+++ lava_dispatcher/tests/test-config/bin/fake-qemu	1970-01-01 00:00:00 +0000
@@ -1,3 +0,0 @@ 
-#!/bin/sh
-
-echo 'QEMU emulator version 1.5.0 (Debian 1.5.0+dfsg-4), Copyright (c) 2003-2008 Fabrice Bellard'

=== removed file 'lava_dispatcher/tests/test_config.py'
--- lava_dispatcher/tests/test_config.py	2013-08-28 14:55:50 +0000
+++ lava_dispatcher/tests/test_config.py	1970-01-01 00:00:00 +0000
@@ -1,35 +0,0 @@ 
-# Copyright (C) 2011 Linaro Limited
-#
-# Author: Linaro Validation Team <linaro-dev@lists.linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses>.
-
-from unittest import TestCase
-
-import lava_dispatcher.config
-from lava_dispatcher.config import get_config, get_device_config
-from lava_dispatcher.utils import string_to_list
-
-from lava_dispatcher.tests.helper import *
-
-class TestConfigData(LavaDispatcherTestCase):
-
-    def test_server_ip(self):
-        create_config('lava-dispatcher.conf', { 'LAVA_SERVER_IP': '99.99.99.99' })
-        server_config = get_config()
-        expected = "99.99.99.99"
-        lava_server_ip = server_config.lava_server_ip
-        self.assertEqual(expected, lava_server_ip)

=== removed file 'lava_dispatcher/tests/test_device_version.py'
--- lava_dispatcher/tests/test_device_version.py	2013-08-28 14:55:50 +0000
+++ lava_dispatcher/tests/test_device_version.py	1970-01-01 00:00:00 +0000
@@ -1,62 +0,0 @@ 
-# Copyright (C) 2012 Linaro Limited
-#
-# Author: Antonio Terceiro <antonio.terceiro@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses>.
-
-import re
-from lava_dispatcher.tests.helper import LavaDispatcherTestCase, create_device_config, create_config
-import os
-
-from lava_dispatcher.device.target import Target
-from lava_dispatcher.device.qemu import QEMUTarget
-from lava_dispatcher.device.fastmodel import FastModelTarget
-from lava_dispatcher.context import LavaContext
-from lava_dispatcher.config import get_config
-
-
-def _create_fastmodel_target():
-    config = create_device_config('fastmodel01', {'device_type': 'fastmodel',
-                                                  'simulator_binary': '/path/to/fastmodel',
-                                                  'license_server': 'foo.local'})
-    target = FastModelTarget(None, config)
-    return target
-
-
-def _create_qemu_target(extra_device_config={}):
-    create_config('lava-dispatcher.conf', {})
-
-    device_config_data = {'device_type': 'qemu'}
-    device_config_data.update(extra_device_config)
-    device_config = create_device_config('qemu01', device_config_data)
-
-    dispatcher_config = get_config()
-
-    context = LavaContext('qemu01', dispatcher_config, None, None, None)
-    return QEMUTarget(context, device_config)
-
-
-class TestDeviceVersion(LavaDispatcherTestCase):
-
-    def test_base(self):
-        target = Target(None, None)
-        self.assertIsInstance(target.get_device_version(), str)
-
-    def test_qemu(self):
-        fake_qemu = os.path.join(os.path.dirname(__file__), 'test-config', 'bin', 'fake-qemu')
-        target = _create_qemu_target({'qemu_binary': fake_qemu})
-        device_version = target.get_device_version()
-        assert(re.search('^[0-9.]+', device_version))

=== removed file 'lava_dispatcher/utils.py'
--- lava_dispatcher/utils.py	2013-08-28 14:55:50 +0000
+++ lava_dispatcher/utils.py	1970-01-01 00:00:00 +0000
@@ -1,278 +0,0 @@ 
-# Copyright (C) 2011-2012 Linaro Limited
-#
-# Author: Paul Larson <paul.larson@linaro.org>
-#
-# This file is part of LAVA Dispatcher.
-#
-# LAVA Dispatcher is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# LAVA Dispatcher is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along
-# with this program; if not, see <http://www.gnu.org/licenses>.
-
-import atexit
-import datetime
-import errno
-import logging
-import os
-import shutil
-import tempfile
-import threading
-import time
-import urlparse
-import subprocess
-
-from shlex import shlex
-
-import pexpect
-
-from lava_dispatcher.errors import CriticalError
-
-
-def link_or_copy_file(src, dest):
-    try:
-        dirname = os.path.dirname(dest)
-        if not os.path.exists(dirname):
-            os.makedirs(dirname)
-        os.link(src, dest)
-    except OSError, err:
-        if err.errno == errno.EXDEV:
-            shutil.copy(src, dest)
-        if err.errno == errno.EEXIST:
-            logging.debug("Cached copy of %s already exists" % dest)
-        else:
-            logging.exception("os.link '%s' with '%s' failed" % (src, dest))
-
-
-def copy_file(src, dest):
-    dirname = os.path.dirname(dest)
-    if not os.path.exists(dir):
-        os.makedirs(dirname)
-    shutil.copy(src, dest)
-
-
-def rmtree(directory):
-    subprocess.call(['rm', '-rf', directory])
-
-
-def mkdtemp(basedir='/tmp'):
-    """ returns a temporary directory that's deleted when the process exits
-    """
-
-    d = tempfile.mkdtemp(dir=basedir)
-    atexit.register(rmtree, d)
-    os.chmod(d, 0755)
-    return d
-
-
-def mk_targz(tfname, rootdir, basedir='.', asroot=False):
-    """ Similar shutil.make_archive but it doesn't blow up with unicode errors
-    """
-    cmd = 'tar -C %s -czf %s %s' % (rootdir, tfname, basedir)
-    if asroot:
-        cmd = 'nice sudo %s' % cmd
-    if logging_system(cmd):
-        raise CriticalError('Unable to make tarball of: %s' % rootdir)
-
-
-def _list_files(dirname):
-    files = []
-    for f in os.listdir(dirname):
-        f = os.path.join(dirname, f)
-        if os.path.isdir(f):
-            files.extend(_list_files(f))
-        elif os.path.isfile(f):
-            files.append(f)
-    return files
-
-
-def extract_targz(tfname, tmpdir):
-    """ Extracts the contents of a .tgz file to the tmpdir. It then returns
-    a list of all the files (full path). This is being used to get around
-    issues that python's tarfile seems to have with unicode
-    """
-    if logging_system('nice tar -C %s -xzf %s' % (tmpdir, tfname)):
-        raise CriticalError('Unable to extract tarball: %s' % tfname)
-
-    return _list_files(tmpdir)
-
-
-def ensure_directory(path):
-    """ ensures the path exists, if it doesn't it will be created
-    """
-    if not os.path.exists(path):
-        os.mkdir(path)
-
-
-def ensure_directory_empty(path):
-    """ Ensures the given directorty path exists, and is empty. It will delete
-    The directory contents if needed.
-    """
-    if os.path.exists(path):
-        rmtree(path)
-    os.mkdir(path)
-
-
-def url_to_cache(url, cachedir):
-    url_parts = urlparse.urlsplit(url)
-    path = os.path.join(cachedir, url_parts.netloc,
-                        url_parts.path.lstrip(os.sep))
-    return path
-
-
-def string_to_list(string):
-    splitter = shlex(string, posix=True)
-    splitter.whitespace = ","
-    splitter.whitespace_split = True
-    newlines_to_spaces = lambda x: x.replace('\n', ' ')
-    strip_newlines = lambda x: newlines_to_spaces(x).strip(' ')
-    return map(strip_newlines, list(splitter))
-
-
-def logging_system(cmd):
-    logging.debug("Executing on host : '%r'" % cmd)
-    return os.system(cmd)
-
-
-class DrainConsoleOutput(threading.Thread):
-
-    def __init__(self, proc=None, timeout=None):
-        threading.Thread.__init__(self)
-        self.proc = proc
-        self.timeout = timeout
-        self._stopevent = threading.Event()
-        self.daemon = True  # allow thread to die when main main proc exits
-
-    def run(self):
-        expect_end = None
-        if self.timeout and (self.timeout > -1):
-            expect_end = time.time() + self.timeout
-        while not self._stopevent.isSet():
-            if expect_end and (expect_end <= time.time()):
-                logging.info("DrainConsoleOutput times out:%s" % self.timeout)
-                break
-            self.proc.empty_buffer()
-            time.sleep(5)
-
-    def join(self, timeout=None):
-        self._stopevent.set()
-        threading.Thread.join(self, timeout)
-
-
-class logging_spawn(pexpect.spawn):
-
-    def __init__(self, command, timeout=30):
-        pexpect.spawn.__init__(
-            self, command, timeout=timeout)
-
-        # serial can be slow, races do funny things, so increase delay
-        self.delaybeforesend = 0.05
-
-    def sendline(self, s=''):
-        logging.debug("sendline : %s", s)
-        return super(logging_spawn, self).sendline(s)
-
-    def send(self, string):
-        logging.debug("send : %s", string)
-        sent = 0
-        for char in string:
-            sent += super(logging_spawn, self).send(char)
-        return sent
-
-    def expect(self, *args, **kw):
-        # some expect should not be logged because it is so much noise.
-        if 'lava_no_logging' in kw:
-            del kw['lava_no_logging']
-            return self.expect(*args, **kw)
-
-        if 'timeout' in kw:
-            timeout = kw['timeout']
-        else:
-            timeout = self.timeout
-
-        if len(args) == 1:
-            logging.debug("expect (%d): '%s'", timeout, args[0])
-        else:
-            logging.debug("expect (%d): '%s'", timeout, str(args))
-
-        return super(logging_spawn, self).expect(*args, **kw)
-
-    def empty_buffer(self):
-        """Make sure there is nothing in the pexpect buffer."""
-        index = 0
-        while index == 0:
-            index = self.expect(
-                ['.+', pexpect.EOF, pexpect.TIMEOUT],
-                timeout=1, lava_no_logging=1)
-
-
-def connect_to_serial(context):
-    """
-    Attempts to connect to a serial console server like conmux or cyclades
-    """
-    retry_count = 0
-    retry_limit = 3
-
-    port_stuck_message = 'Data Buffering Suspended\.'
-    conn_closed_message = 'Connection closed by foreign host\.'
-
-    expectations = {
-        port_stuck_message: 'reset-port',
-        'Connected\.\r': 'all-good',
-        conn_closed_message: 'retry',
-        pexpect.TIMEOUT: 'all-good',
-    }
-    patterns = []
-    results = []
-    for pattern, result in expectations.items():
-        patterns.append(pattern)
-        results.append(result)
-
-    while retry_count < retry_limit:
-        proc = context.spawn(
-            context.device_config.connection_command,
-            timeout=1200)
-        logging.info('Attempting to connect to device')
-        match = proc.expect(patterns, timeout=10)
-        result = results[match]
-        logging.info('Matched %r which means %s', patterns[match], result)
-        if result == 'retry':
-            proc.close(True)
-            retry_count += 1
-            time.sleep(5)
-            continue
-        elif result == 'all-good':
-            atexit.register(proc.close, True)
-            return proc
-        elif result == 'reset-port':
-            reset_cmd = context.device_config.reset_port_command
-            if reset_cmd:
-                context.run_command(reset_cmd)
-            else:
-                raise CriticalError('no reset_port command configured')
-            proc.close(True)
-            retry_count += 1
-            time.sleep(5)
-    raise CriticalError('could execute connection_command successfully')
-
-
-# XXX Duplication: we should reuse lava-test TestArtifacts
-def generate_bundle_file_name(test_name):
-    return ("{test_id}.{time.tm_year:04}-{time.tm_mon:02}-{time.tm_mday:02}T"
-            "{time.tm_hour:02}:{time.tm_min:02}:{time.tm_sec:02}Z").format(
-                test_id=test_name,
-                time=datetime.datetime.utcnow().timetuple())
-
-def finalize_process(proc):
-    if proc:
-        logging.debug("Finalizing child proccess with PID %d" % proc.pid)
-        proc.kill(9)
-        proc.close()

=== removed directory 'lava_test_shell'
=== removed file 'lava_test_shell/README'
--- lava_test_shell/README	2013-04-11 19:10:11 +0000
+++ lava_test_shell/README	1970-01-01 00:00:00 +0000
@@ -1,11 +0,0 @@ 
-This directory contains support scripts for lava-test-shell.
-
-The scripts in this directory will be copied into the target device and will be
-in $PATH during the lava-test-shell execution.
-
-Distribution-specific scripts can be placed in distro/$distroname, and will
-override the ones in the top level directory. For example,
-distro/android/lava-test-runner will be used on Android instead of the
-lava-test-runner script present at the same directory as this README file.
-
-All scripts have to be named using a "lava-" suffix.

=== removed directory 'lava_test_shell/distro'
=== removed directory 'lava_test_shell/distro/android'
=== removed file 'lava_test_shell/distro/android/lava-test-runner'
--- lava_test_shell/distro/android/lava-test-runner	2013-04-11 18:45:52 +0000
+++ lava_test_shell/distro/android/lava-test-runner	1970-01-01 00:00:00 +0000
@@ -1,107 +0,0 @@ 
-#!/system/bin/mksh
-
-LCK=${LCK-"/lava-test-runner.lck"}
-
-#make sure we are only run once
-if [ ! -f ${LCK} ] ; then
-	( flock -n 9 || exit 1 ; true ) 9>${LCK}
-else
-	exit 0
-fi
-
-PREFIX="<LAVA_TEST_RUNNER>:"
-WORKFILE="/data/lava/lava-test-runner.conf"
-RESULTSDIR="/data/lava/results"
-BINDIR="/data/lava/bin"
-
-hwcontext()
-{
-	mkdir -p ${RESULTSDIR}/hwcontext
-	cpuinfo=${RESULTSDIR}/hwcontext/cpuinfo.txt
-	meminfo=${RESULTSDIR}/hwcontext/meminfo.txt
-
-	[ -f ${cpuinfo} ] || cat /proc/cpuinfo > ${cpuinfo}
-	[ -f ${meminfo} ] || cat /proc/meminfo > ${meminfo}
-}
-
-swcontext()
-{
-	mkdir -p ${RESULTSDIR}/swcontext
-	build=${RESULTSDIR}/swcontext/build.txt
-	pkgs=${RESULTSDIR}/swcontext/pkgs.txt
-
-	[ -f ${build} ] || getprop ro.build.display.id > ${build}
-	[ -f ${pkgs} ] || pm list packages > ${pkgs}
-}
-
-cleanup()
-{
-	# just adds a little handy debugging
-	ls ${RESULTSDIR}
-	echo "${PREFIX} calling sync"
-	sync
-	echo "${PREFIX} exiting"
-}
-
-{
-	trap cleanup INT TERM EXIT
-
-	export PATH=${BINDIR}:${PATH}
-	echo "${PREFIX} started"
-	mkdir -p ${RESULTSDIR}
-
-	echo "${PREFIX} disabling suspend and waiting for home screen ..."
-	disablesuspend.sh
-
-	# move the workfile to something timestamped and run that. This
-	# prevents us from running the same thing again after a reboot
-	TS=`date +%s`
-	mv ${WORKFILE} ${WORKFILE}-${TS}
-	WORKFILE=${WORKFILE}-${TS}
-
-	echo "${PREFIX} looking for installation work in ${WORKFILE}"
-	for line in $(cat ${WORKFILE}); do
-		# we don't have "basename" on android, but this does the
-		# equivalent under mksh
-		testdir=${line%/} # trim off trailing slash iff it exists
-		test=${testdir/*\//}
-		if [ -f ${line}/install.sh ] ; then
-			echo "${PREFIX} running ${test} installer ..."
-			/system/bin/sh ${line}/install.sh
-			if [ $? -ne 0 ] ; then
-				echo "${PREFIX} ${test} installer failed, exiting"
-				hwcontext
-				swcontext
-				exit 1
-			fi
-		fi
-	done
-
-	echo "${PREFIX} save hardware/software context info..."
-	hwcontext
-	swcontext
-
-	echo "${PREFIX} looking for work in ${WORKFILE}"
-	for line in $(cat ${WORKFILE}); do
-		# we don't have "basename" on android, but this does the
-		# equivalent under mksh
-		testdir=${line%/} # trim off trailing slash iff it exists
-		test=${testdir/*\//}
-		echo "${PREFIX} running ${test} under lava-test-shell..."
-		odir=${RESULTSDIR}/${test}-`date +%s`
-		mkdir ${odir}
-		mkdir ${odir}/attachments/
-		cp ${line}/testdef.yaml ${odir}/
-		cp ${line}/testdef_metadata ${odir}/
-		cp ${line}/uuid ${odir}/analyzer_assigned_uuid
-		cp ${line}/run.sh ${odir}/attachments/
-		echo 'text/plain' > ${odir}/attachments/run.sh.mimetype
-		if [ -f ${line}/install.sh ]; then
-			cp ${line}/install.sh ${odir}/attachments/
-			echo 'text/plain' > ${odir}/attachments/install.sh.mimetype
-		fi
-		lava-test-shell --output_dir ${odir} /system/bin/sh -e "${line}/run.sh"
-		echo "${PREFIX} ${test} exited with: `cat ${odir}/return_code`"
-	done
-}
-

=== removed directory 'lava_test_shell/distro/fedora'
=== removed file 'lava_test_shell/distro/fedora/lava-install-packages'
--- lava_test_shell/distro/fedora/lava-install-packages	2013-06-06 14:47:36 +0000
+++ lava_test_shell/distro/fedora/lava-install-packages	1970-01-01 00:00:00 +0000
@@ -1,3 +0,0 @@ 
-#!/bin/sh
-
-yum -e 0 -y -q install "$@"

=== removed file 'lava_test_shell/distro/fedora/lava-installed-packages'
--- lava_test_shell/distro/fedora/lava-installed-packages	2013-04-16 02:59:05 +0000
+++ lava_test_shell/distro/fedora/lava-installed-packages	1970-01-01 00:00:00 +0000
@@ -1,3 +0,0 @@ 
-#!/bin/sh
-
-rpm -qa --qf "package: %{NAME} : %{VERSION}-%{RELEASE} \n"

=== removed file 'lava_test_shell/distro/fedora/lava-os-build'
--- lava_test_shell/distro/fedora/lava-os-build	2013-04-16 02:59:05 +0000
+++ lava_test_shell/distro/fedora/lava-os-build	1970-01-01 00:00:00 +0000
@@ -1,3 +0,0 @@ 
-#!/bin/sh
-
-cat /etc/os-release | grep PRETTY_NAME | cut -d\" -f2

=== removed directory 'lava_test_shell/distro/ubuntu'
=== removed file 'lava_test_shell/distro/ubuntu/lava-install-packages'
--- lava_test_shell/distro/ubuntu/lava-install-packages	2013-06-06 14:47:36 +0000
+++ lava_test_shell/distro/ubuntu/lava-install-packages	1970-01-01 00:00:00 +0000
@@ -1,4 +0,0 @@ 
-#!/bin/sh
-
-DEBIAN_FRONTEND=noninteractive apt-get update
-DEBIAN_FRONTEND=noninteractive apt-get install -y -q "$@"

=== removed file 'lava_test_shell/distro/ubuntu/lava-installed-packages'
--- lava_test_shell/distro/ubuntu/lava-installed-packages	2013-04-11 19:50:48 +0000
+++ lava_test_shell/distro/ubuntu/lava-installed-packages	1970-01-01 00:00:00 +0000
@@ -1,3 +0,0 @@ 
-#!/bin/sh
-
-dpkg-query -W -f '${status} ${package} : ${version}\n' | sed -n 's/^install ok installed/package:/p'

=== removed file 'lava_test_shell/distro/ubuntu/lava-os-build'
--- lava_test_shell/distro/ubuntu/lava-os-build	2013-04-11 19:50:48 +0000
+++ lava_test_shell/distro/ubuntu/lava-os-build	1970-01-01 00:00:00 +0000
@@ -1,3 +0,0 @@ 
-#!/bin/sh
-
-cat /etc/lsb-release | grep DESCRIPTION | cut -d\" -f2

=== removed file 'lava_test_shell/lava-installed-packages'
--- lava_test_shell/lava-installed-packages	2013-04-11 19:50:48 +0000
+++ lava_test_shell/lava-installed-packages	1970-01-01 00:00:00 +0000
@@ -1,3 +0,0 @@ 
-#!/bin/sh
-
-echo 'Unsupported distro: cannot obtain list of installed packages'

=== removed file 'lava_test_shell/lava-os-build'
--- lava_test_shell/lava-os-build	2013-04-11 19:50:48 +0000
+++ lava_test_shell/lava-os-build	1970-01-01 00:00:00 +0000
@@ -1,3 +0,0 @@ 
-#!/bin/sh
-
-echo 'Unsupported distro: cannot determine build version'

=== removed file 'lava_test_shell/lava-test-case'
--- lava_test_shell/lava-test-case	2013-01-04 13:23:08 +0000
+++ lava_test_shell/lava-test-case	1970-01-01 00:00:00 +0000
@@ -1,81 +0,0 @@ 
-#NOTE the lava_test_shell_action fills in the proper interpreter path
-# above during target deployment
-
-usage () {
-    echo "Usage: lava-test-case TEST_CASE_ID --shell cmds ..."
-    echo "   or: lava-test-case TEST_CASE_ID --result RESULT [--units UNITS] "
-    echo "                                   [--measurement MEASUREMENT]"
-    echo ""
-    echo "Either run or record the results of a particular test case"
-}
-
-rc=0
-
-TEST_CASE_ID="$1"
-shift
-if [ -z "$TEST_CASE_ID" ]; then
-    usage
-    exit 1
-fi
-if [ "$1" = "--shell" ]; then
-    shift
-    echo "<LAVA_SIGNAL_STARTTC $TEST_CASE_ID>"
-    read
-    $*
-    rc=$?
-    echo "<LAVA_SIGNAL_ENDTC $TEST_CASE_ID>"
-    read
-    if [ $rc -eq 0 ]; then
-        RESULT=pass
-    else
-        RESULT=fail
-    fi
-else
-    while [ $# -gt 0 ]; do
-        case $1 in
-            --result)
-                shift
-                RESULT=$1
-                shift
-                ;;
-            --units)
-                shift
-                UNITS=$1
-                shift
-                ;;
-            --measurement)
-                shift
-                MEASUREMENT=$1
-                shift
-                ;;
-            *)
-                usage
-                exit 1
-                ;;
-        esac
-    done
-fi
-
-# $LAVA_RESULT_DIR is set by lava-test-shell
-result_dir="$LAVA_RESULT_DIR/results/$TEST_CASE_ID"
-mkdir -p "$result_dir"
-
-if [ -z "${RESULT+x}" ]; then
-    echo "--result must be specified"
-    exit 1
-else
-    echo $RESULT > $result_dir/result
-fi
-
-if [ -n "${UNITS+x}" ]; then
-    echo $UNITS > $result_dir/units
-fi
-
-if [ -n "${MEASUREMENT+x}" ]; then
-    echo $MEASUREMENT > $result_dir/measurement
-fi
-
-# lava-test-case testname --shell false should report a fail as test result
-# but not fail itself; hence don't honor 'rc' if we reach this, but exit 0
-exit 0
-

=== removed file 'lava_test_shell/lava-test-case-attach'
--- lava_test_shell/lava-test-case-attach	2013-01-04 13:23:08 +0000
+++ lava_test_shell/lava-test-case-attach	1970-01-01 00:00:00 +0000
@@ -1,45 +0,0 @@ 
-#NOTE the lava_test_shell_action fills in the proper interpreter path
-# above during target deployment
-
-# basename is not present on AOSP builds, but the /*\// thing does not
-# work with dash (Ubuntu builds) or busybox (OpenEmbedded).  Both of
-# those have basename though.
-type basename > /dev/null || basename () { echo ${1/*\//}; }
-
-usage () {
-    echo "Usage: lava-test-case-attach TEST_CASE_ID FILE [MIME_TYPE]"
-    echo ""
-    echo "Attach FILE to the test case TEST_CASE_ID."
-}
-
-if [ $# -ne 2 -a $# -ne 3 ]; then
-    usage
-    exit 1
-fi
-
-TEST_CASE_ID="$1"
-shift
-FILE="$1"
-shift
-MIMETYPE="$1"
-
-if [ -z "$FILE" ]; then
-    usage
-    exit 1
-fi
-if [ ! -f "$FILE" ]; then
-    echo "File $FILE not found"
-    exit 1
-fi
-if [ -z "$TEST_CASE_ID" ]; then
-    usage
-    exit 1
-fi
-
-# $LAVA_RESULT_DIR is set by lava-test-shell
-case_attachment_dir="$LAVA_RESULT_DIR/results/$TEST_CASE_ID/attachments"
-mkdir -p "$case_attachment_dir"
-cp "$FILE" "$case_attachment_dir"
-if [ ! -z "$MIMETYPE" ]; then
-    echo "$MIMETYPE" > "$case_attachment_dir/$(basename $FILE).mimetype"
-fi

=== removed file 'lava_test_shell/lava-test-run-attach'
--- lava_test_shell/lava-test-run-attach	2013-01-04 13:23:08 +0000
+++ lava_test_shell/lava-test-run-attach	1970-01-01 00:00:00 +0000
@@ -1,39 +0,0 @@ 
-#NOTE the lava_test_shell_action fills in the proper interpreter path
-# above during target deployment
-
-# basename is not present on AOSP builds, but the /*\// thing does not
-# work with dash (Ubuntu builds) or busybox (OpenEmbedded).  Both of
-# those have basename though.
-type basename > /dev/null || basename () { echo ${1/*\//}; }
-
-usage () {
-    echo "Usage: lava-test-run-attach FILE [MIME_TYPE]"
-    echo ""
-    echo "Attach FILE to the current test run."
-}
-
-if [ $# -ne 1 -a $# -ne 2 ]; then
-    usage
-    exit 1
-fi
-
-FILE="$1"
-shift
-MIMETYPE="$1"
-
-if [ -z "$FILE" ]; then
-    usage
-    exit 1
-fi
-if [ ! -f "$FILE" ]; then
-    echo "File $FILE not found"
-    exit 1
-fi
-
-# $LAVA_RESULT_DIR is set by lava-test-shell
-attachment_dir="$LAVA_RESULT_DIR/attachments"
-mkdir -p "$attachment_dir"
-cp "$FILE" "$attachment_dir"
-if [ ! -z "$MIMETYPE" ]; then
-    echo "$MIMETYPE" > "$attachment_dir/$(basename $FILE).mimetype"
-fi

=== removed file 'lava_test_shell/lava-test-runner'
--- lava_test_shell/lava-test-runner	2013-08-29 16:03:11 +0000
+++ lava_test_shell/lava-test-runner	1970-01-01 00:00:00 +0000
@@ -1,91 +0,0 @@ 
-#!/bin/bash
-
-PREFIX="<LAVA_TEST_RUNNER>:"
-WORKFILE="/lava/lava-test-runner.conf"
-RESULTSDIR="/lava/results"
-BINDIR="/lava/bin"
-
-hwcontext()
-{
-	mkdir -p ${RESULTSDIR}/hwcontext
-	cpuinfo=${RESULTSDIR}/hwcontext/cpuinfo.txt
-	meminfo=${RESULTSDIR}/hwcontext/meminfo.txt
-
-	[ -f ${cpuinfo} ] || cat /proc/cpuinfo > ${cpuinfo}
-	[ -f ${meminfo} ] || cat /proc/meminfo > ${meminfo}
-}
-
-swcontext()
-{
-	mkdir -p ${RESULTSDIR}/swcontext
-	build=${RESULTSDIR}/swcontext/build.txt
-	pkgs=${RESULTSDIR}/swcontext/pkgs.txt
-
-	lava-os-build > ${build}
-
-	# this has to print a list of installed packages that will look similar to
-	# what android's package list does
-	lava-installed-packages  > ${pkgs}
-}
-
-cleanup()
-{
-	# just adds a little handy debugging
-	ls ${RESULTSDIR}
-	echo "${PREFIX} calling sync"
-	sync
-	echo "${PREFIX} exiting"
-}
-trap cleanup INT TERM EXIT
-
-export PATH=${BINDIR}:${PATH}
-echo "${PREFIX} started"
-mkdir -p ${RESULTSDIR}
-
-# move the workfile to something timestamped and run that. This
-# prevents us from running the same thing again after a reboot
-TS=`date +%s`
-mv ${WORKFILE} ${WORKFILE}-${TS}
-WORKFILE=${WORKFILE}-${TS}
-
-echo "${PREFIX} looking for installation work in ${WORKFILE}"
-for line in $(cat ${WORKFILE}); do
-	test=`basename $line`
-	if [ -f ${line}/install.sh ] ; then
-		echo "${PREFIX} running ${test} installer ..."
-		/bin/sh ${line}/install.sh
-		if [ $? -ne 0 ] ; then
-			echo "${PREFIX} ${test} installer failed, exiting"
-			hwcontext
-			swcontext
-			exit 1
-		fi
-	fi
-done
-
-echo "${PREFIX} save hardware/software context info..."
-hwcontext
-swcontext
-
-echo "${PREFIX} looking for work in ${WORKFILE}"
-for line in $(cat ${WORKFILE}); do
-	test=`basename $line`
-	echo "${PREFIX} running ${test} under lava-test-shell..."
-	odir=${RESULTSDIR}/${test}-`date +%s`
-	mkdir ${odir}
-	mkdir ${odir}/attachments/
-	cp ${line}/uuid ${odir}/analyzer_assigned_uuid
-	cp ${line}/testdef.yaml ${odir}/
-	cp ${line}/testdef_metadata ${odir}/
-	cp ${line}/run.sh ${odir}/attachments/
-	echo 'text/plain' > ${odir}/attachments/run.sh.mimetype
-	if [ -f ${line}/install.sh ]; then
-	    cp ${line}/install.sh ${odir}/attachments/
-	    echo 'text/plain' > ${odir}/attachments/install.sh.mimetype
-	fi
-	# run.sh includes a "read -t <timeout>" which isn't supported by dash
-	# so be sure to use bash
-	lava-test-shell --output_dir ${odir} /bin/bash -e "${line}/run.sh"
-	echo "${PREFIX} ${test} exited with: `cat ${odir}/return_code`"
-done
-

=== removed file 'lava_test_shell/lava-test-shell'
--- lava_test_shell/lava-test-shell	2013-01-04 13:23:08 +0000
+++ lava_test_shell/lava-test-shell	1970-01-01 00:00:00 +0000
@@ -1,15 +0,0 @@ 
-#NOTE the lava_test_shell_action fills in the proper interpreter path
-# above during target deployment
-
-shift
-ODIR=$1
-shift
-TEST=$*
-export LAVA_RESULT_DIR=${ODIR}
-{
-	$TEST
-	echo $? > ${ODIR}/return_code
-} 2>&1 | while read line; do
-	echo "$line"
-	echo "$line" >> ${ODIR}/stdout.log
-done

=== removed directory 'lava_test_shell/multi_node'
=== removed file 'lava_test_shell/multi_node/lava-group'
--- lava_test_shell/multi_node/lava-group	2013-08-26 08:33:13 +0000
+++ lava_test_shell/multi_node/lava-group	1970-01-01 00:00:00 +0000
@@ -1,19 +0,0 @@ 
-#!/bin/sh
-#
-#This file is for Multi-Node test
-#
-#This command will produce in its standard output a representation of the
-#device group that is participating in the multi-node test job.
-#
-#Usage: ``lava-group``
-#
-#The output format contains one line per device, and each line contains
-#the hostname and the role that device is playing in the test, separated
-#by a TAB character::
-#
-#	panda01	client
-#	highbank01	loadbalancer
-#	highbank02	backend
-#	highbank03	backend
-
-printf ${LAVA_GROUP}

=== removed file 'lava_test_shell/multi_node/lava-multi-node.lib'
--- lava_test_shell/multi_node/lava-multi-node.lib	2013-08-26 16:30:25 +0000
+++ lava_test_shell/multi_node/lava-multi-node.lib	1970-01-01 00:00:00 +0000
@@ -1,210 +0,0 @@ 
-#!/bin/sh
-#
-#This file is for Multi-Node test
-#
-
-MESSAGE_PREFIX="<LAVA_MULTI_NODE>"
-MESSAGE_COMMAND="<${LAVA_MULTI_NODE_API}"
-MESSAGE_HEAD="$MESSAGE_PREFIX $MESSAGE_COMMAND"
-#MESSAGE_ID="<$1>"
-MESSAGE_ACK="<${LAVA_MULTI_NODE_API}_ACK>"
-
-MESSAGE_REPLY="<${LAVA_MULTI_NODE_API}_COMPLETE"
-MESSAGE_REPLY_ACK="<${LAVA_MULTI_NODE_API}_COMPLETE_ACK>"
-
-LAVA_MULTI_NODE_EXIT_ERROR=1
-
-_get_key_value_pattern () {
-	echo $@|\
-	tr ' ' '\n' |\
-	sed -n '/\b\w\w*[=]\w\w*\b/p'|\
-	tr '\n' ' '
-}
-
-_lava_multi_node_debug () {
-
-if [ -n "$LAVA_MULTI_NODE_DEBUG" ] ; then
-	echo "${MESSAGE_COMMAND}_DEBUG $@ $(date)>"
-fi
-
-}
-
-_lava_multi_node_send () {
-
-_lava_multi_node_debug "$FUNCNAME started"
-
-result=$(echo $1 | grep "..*=..*")
-
-if [ -n "$1" -a "${result}x" = "x" ] ; then
-	echo ${MESSAGE_HEAD} $@">"
-else
-	_lava_multi_node_debug "$FUNCNAME error messageID : " "$result"
-	exit $LAVA_MULTI_NODE_EXIT_ERROR
-fi
-
-_lava_multi_node_debug "$FUNCNAME finished"
-
-}
-
-_lava_multi_node_process_message () {
-
-_lava_multi_node_debug "$FUNCNAME save message to $LAVA_MULTI_NODE_CACHE"
-#clean old cache file
-rm $LAVA_MULTI_NODE_CACHE 2>/dev/null
-
-until [ -z "$1" ] ; do
-	result=$(echo $1 | grep "..*=..*")
-	if [ "${result}x" != "x" ] ; then
-		echo $1 >> $LAVA_MULTI_NODE_CACHE
-	elif [ "${1}x" = "nackx" ] ; then
-		echo "Error:no-response $1, Exit from $LAVA_MULTI_NODE_API!"
-		exit $LAVA_MULTI_NODE_EXIT_ERROR
-	else
-		echo "Warning:unrecognized message $1"
-	fi
-	shift
-done
-}
-
-lava_multi_node_send () {
-
-_lava_multi_node_debug "$FUNCNAME preparing"
-
-_lava_multi_node_send $@
-
-while [ -n "$MESSAGE_NEED_ACK" -a "${SHELL}x" = "/bin/bashx" ] ; do
-_lava_multi_node_debug "$FUNCNAME waiting for ack"
-	read -t $MESSAGE_TIMEOUT line
-	result=$(echo $line | grep "${MESSAGE_ACK}")
-	if [ "${result}x" != "x" ] ; then
-#		echo ${MESSAGE_ACK}
-		break
-	fi
-	_lava_multi_node_send $@
-done
-
-_lava_multi_node_debug "$FUNCNAME finished"
-
-}
-
-lava_multi_node_wait_for_signal () {
-
-_lava_multi_node_debug "$FUNCNAME starting to wait"
-
-while read line; do
-	result=$(echo $line | grep "${MESSAGE_REPLY}>")
-	if [ "${result}x" != "x" ] ; then
-		if [ -n "$MESSAGE_NEED_ACK" ] ; then
-			echo ${MESSAGE_REPLY_ACK}
-		fi
-		break
-	fi
-done
-
-_lava_multi_node_debug "$FUNCNAME waiting over"
-
-}
-
-lava_multi_node_wait_for_message () {
-
-_lava_multi_node_debug "$FUNCNAME starting to wait"
-
-if [ -n "$1" ] ; then
-	export LAVA_MULTI_NODE_CACHE=$1
-fi
-
-while read line; do
-	result=$(echo $line | grep "${MESSAGE_REPLY}")
-	if [ "${result}x" != "x" ] ; then
-		line=${line##*${MESSAGE_REPLY}}
-		_lava_multi_node_process_message ${line%%>*}
-		if [ -n "$MESSAGE_NEED_ACK" ] ; then
-			echo ${MESSAGE_REPLY_ACK}
-		fi
-		break
-	fi
-done
-
-_lava_multi_node_debug "$FUNCNAME waiting over"
-
-}
-
-lava_multi_node_get_network_info () {
-
-_NETWORK_INTERFACE=$1
-_RAW_STREAM_V4=`ifconfig $_NETWORK_INTERFACE |grep "inet "`
-_RAW_STREAM_V6=`ifconfig $_NETWORK_INTERFACE |grep "inet6 "`
-_RAW_STREAM_MAC=`ifconfig $_NETWORK_INTERFACE |grep "ether "`
-
-_IPV4_STREAM_IP=`echo $_RAW_STREAM_V4 | cut -f2 -d" "`
-_IPV4_STREAM_NM=`echo $_RAW_STREAM_V4 | cut -f4 -d" "`
-_IPV4_STREAM_BC=`echo $_RAW_STREAM_V4 | cut -f6 -d" "`
-_IPV4_STREAM="ipv4="$_IPV4_STREAM_IP" netmask="$_IPV4_STREAM_NM" \
-broadcast="$_IPV4_STREAM_BC
-
-_IPV6_STREAM_IP=`echo $_RAW_STREAM_V6 | cut -f2 -d" "`
-_IPV6_STREAM="ipv6="$_IPV6_STREAM_IP
-
-_MAC_STREAM="mac="`echo $_RAW_STREAM_MAC | cut -f2 -d" "`
-
-_HOSTNAME_STREAM="hostname="`hostname`
-
-_HOSTNAME_FULL_STREAM="hostname-full="`hostname -f`
-
-_DEF_GATEWAY_STREAM="default-gateway="`route -n |grep "UG "|  cut -f10 -d" "`
-
-#get DNS configure
-_Counter=1
-for line in `cat /etc/resolv.conf | grep "nameserver"| cut -d " " -f 2` ; do
-	export _DNS_${_Counter}_STREAM=$line
-	_Counter=`expr ${_Counter} + 1`
-done
-_DNS_STREAM="dns_1=${_DNS_1_STREAM} dns_2=${_DNS_2_STREAM} \
-dns_3=${_DNS_3_STREAM}"
-
-_get_key_value_pattern $_IPV4_STREAM $_IPV6_STREAM $_MAC_STREAM \
-$_HOSTNAME_STREAM $_HOSTNAME_FULL_STREAM $_DEF_GATEWAY_STREAM $_DNS_STREAM
-
-}
-
-lava_multi_node_check_cache () {
-
-if [ -n "$1" ] ; then
-	export LAVA_MULTI_NODE_CACHE=$1
-fi
-
-if [ ! -f $LAVA_MULTI_NODE_CACHE ] ; then
-	_lava_multi_node_debug "$FUNCNAME not cache file $LAVA_MULTI_NODE_CACHE !"
-	exit $LAVA_MULTI_NODE_EXIT_ERROR
-fi
-
-}
-
-lava_multi_node_print_host_info () {
-
-_HOSTNAME=$1
-_INFO=$2
-_RAW_STREAM=`cat $LAVA_MULTI_NODE_NETWORK_CACHE |grep "$_HOSTNAME:$_INFO="`
-
-if [ -n "$_RAW_STREAM" ] ; then
-	echo $_RAW_STREAM|cut -d'=' -f2
-fi
-
-}
-
-lava_multi_node_make_hosts () {
-
-for line in `grep ":ipv4" $LAVA_MULTI_NODE_NETWORK_CACHE` ; do
-	_IP_STREAM=`echo $line | cut -d'=' -f2`
-	_TARGET_STREAM=`echo $line | cut -d':' -f1`
-	_HOSTNAME_STREAM=`grep "$_TARGET_STREAM:hostname=" \
-$LAVA_MULTI_NODE_NETWORK_CACHE | cut -d'=' -f2`
-	if [ -n "$_HOSTNAME_STREAM" ]; then
-		printf "$_IP_STREAM\t$_HOSTNAME_STREAM\n" >> $1
-	else
-		printf "$_IP_STREAM\t$_TARGET_STREAM\n" >> $1
-	fi
-done
-
-}
-

=== removed file 'lava_test_shell/multi_node/lava-network'
--- lava_test_shell/multi_node/lava-network	2013-08-26 08:33:13 +0000
+++ lava_test_shell/multi_node/lava-network	1970-01-01 00:00:00 +0000
@@ -1,104 +0,0 @@ 
-#!/bin/sh
-#
-#This file is for Multi-Node test
-#lava-network
-#-----------------
-#Helper script to broadcast IP data from the test image, wait for data
-#to be received by the rest of the group (or one role within the group)
-#and then provide an interface to retrieve IP data about the group on
-#the command line.
-#
-#Raising a suitable network interface is a job left for the designer of
-#the test definition / image but once a network interface is available,
-#lava-network can be asked to broadcast this information to the rest of
-#the group. At a later stage of the test, before the IP details of the
-#group need to be used, call lava-network collect to receive the same
-#information about the rest of the group.
-#
-#All usage of lava-network needs to use a broadcast (which wraps a call
-#to lava-send) and a collect (which wraps a call to lava-wait-all). As
-#a wrapper around lava-wait-all, collect will block until the rest of
-#the group (or devices in the group with the specified role) has made a
-#broadcast.
-#
-#After the data has been collected, it can be queried for any board
-#specified in the output of lava-group:
-#
-#lava-network query server
-#192.168.3.56
-#
-#Usage:
-#	broadcast network info:
-#		lava-network broadcast [interface]
-#	collect network info:
-#		lava-network collect [interface] <role>
-#	query specific host info:
-#		lava-network query [hostname] [info]
-#	export hosts file:
-#		lava-network hosts [path of hosts]
-#
-#So interface would be mandatory for broadcast and collect, hostname
-#would be mandatory for query, "path of hosts" would be mandatory for
-#hosts, role is optional for collect.
-
-
-LAVA_MULTI_NODE_API="LAVA_NETWORK"
-#MESSAGE_TIMEOUT=5
-#MESSAGE_NEED_ACK=yes
-
-_LAVA_NETWORK_ID="network_info"
-_LAVA_NETWORK_ARG_MIN=2
-
-. $LAVA_TEST_BIN/lava-multi-node.lib
-
-LAVA_MULTI_NODE_NETWORK_CACHE="/tmp/lava_multi_node_network_cache.txt"
-
-_lava_multi_node_debug "$LAVA_MULTI_NODE_API checking arguments..."
-if [ $# -lt $_LAVA_NETWORK_ARG_MIN ]; then
-	_lava_multi_node_debug "$FUNCNAME Not enough arguments."
-	exit $LAVA_MULTI_NODE_EXIT_ERROR
-fi
-
-_lava_multi_node_debug "$LAVA_MULTI_NODE_API handle sub-command..."
-case "$1" in
-	"broadcast")
-	_lava_multi_node_debug "$LAVA_MULTI_NODE_API handle broadcast command..."
-	LAVA_MULTI_NODE_API="LAVA_SEND"
-	MESSAGE_COMMAND="<${LAVA_MULTI_NODE_API}"
-	export MESSAGE_ACK="<${LAVA_MULTI_NODE_API}_ACK>"
-	export MESSAGE_REPLY="<${LAVA_MULTI_NODE_API}_COMPLETE"
-	export MESSAGE_REPLY_ACK="<${LAVA_MULTI_NODE_API}_COMPLETE_ACK>"
-	export MESSAGE_HEAD="$MESSAGE_PREFIX $MESSAGE_COMMAND"
-	NETWORK_INFO_STREAM=`lava_multi_node_get_network_info $2`
-	lava_multi_node_send $_LAVA_NETWORK_ID $NETWORK_INFO_STREAM
-	;;
-
-	"collect")
-	_lava_multi_node_debug "$LAVA_MULTI_NODE_API handle collect command..."
-	LAVA_MULTI_NODE_API="LAVA_WAIT_ALL"
-	MESSAGE_COMMAND="<${LAVA_MULTI_NODE_API}"
-	export MESSAGE_ACK="<${LAVA_MULTI_NODE_API}_ACK>"
-	export MESSAGE_REPLY="<${LAVA_MULTI_NODE_API}_COMPLETE"
-	export MESSAGE_REPLY_ACK="<${LAVA_MULTI_NODE_API}_COMPLETE_ACK>"
-	export MESSAGE_HEAD="$MESSAGE_PREFIX $MESSAGE_COMMAND"
-	lava_multi_node_send $_LAVA_NETWORK_ID $3 
-	lava_multi_node_wait_for_message $LAVA_MULTI_NODE_NETWORK_CACHE
-	;;
-
-	"query")
-	_lava_multi_node_debug "$LAVA_MULTI_NODE_API handle query command..."
-	lava_multi_node_check_cache $LAVA_MULTI_NODE_NETWORK_CACHE
-	lava_multi_node_print_host_info $2 $3
-	;;
-
-	"hosts")
-	_lava_multi_node_debug "$LAVA_MULTI_NODE_API handle hosts command..."
-	lava_multi_node_check_cache $LAVA_MULTI_NODE_NETWORK_CACHE
-	lava_multi_node_make_hosts $2
-	;;
-
-	*)
-	_lava_multi_node_debug "$LAVA_MULTI_NODE_API command $1 is not supported."
-	exit $LAVA_MULTI_NODE_EXIT_ERROR
-	;;
-esac

=== removed file 'lava_test_shell/multi_node/lava-role'
--- lava_test_shell/multi_node/lava-role	2013-06-21 13:23:41 +0000
+++ lava_test_shell/multi_node/lava-role	1970-01-01 00:00:00 +0000
@@ -1,14 +0,0 @@ 
-#!/bin/sh
-#
-#This file is for Multi-Node test
-#
-#Prints the role the current device is playing in a multi-node job.
-#
-#Usage: ``lava-role``
-#
-#*Example.* In a directory with several scripts, one for each role
-#involved in the test::
-#
-#    $ ./run-`lava-role`.sh
-
-echo ${TARGET_ROLE}

=== removed file 'lava_test_shell/multi_node/lava-self'
--- lava_test_shell/multi_node/lava-self	2013-08-26 08:33:13 +0000
+++ lava_test_shell/multi_node/lava-self	1970-01-01 00:00:00 +0000
@@ -1,9 +0,0 @@ 
-#!/bin/sh
-#
-#This file is for Multi-Node test
-#
-#Prints the name of the current device.
-#
-#Usage: ``lava-self``
-
-echo ${LAVA_HOSTNAME}

=== removed file 'lava_test_shell/multi_node/lava-send'
--- lava_test_shell/multi_node/lava-send	2013-08-26 08:33:13 +0000
+++ lava_test_shell/multi_node/lava-send	1970-01-01 00:00:00 +0000
@@ -1,17 +0,0 @@ 
-#!/bin/sh
-#
-#This file is for Multi-Node test
-#
-#Sends a message to the group, optionally passing associated key-value
-#data pairs. Sending a message is a non-blocking operation. The message
-#is guaranteed to be available to all members of the group, but some of
-#them might never retrieve it.
-#
-#Usage: ``lava-send <message-id> [key1=val1 [key2=val2] ...]``
-LAVA_MULTI_NODE_API="LAVA_SEND"
-#MESSAGE_TIMEOUT=5
-#MESSAGE_NEED_ACK=yes
-
-. $LAVA_TEST_BIN/lava-multi-node.lib
-
-lava_multi_node_send $1 $(_get_key_value_pattern $@)

=== removed file 'lava_test_shell/multi_node/lava-sync'
--- lava_test_shell/multi_node/lava-sync	2013-08-26 08:33:13 +0000
+++ lava_test_shell/multi_node/lava-sync	1970-01-01 00:00:00 +0000
@@ -1,20 +0,0 @@ 
-#!/bin/sh
-#
-#This file is for Multi-Node test
-#
-#Global synchronization primitive. Sends a message, and waits for the
-#same message from all of the other devices.
-#
-#Usage: ``lava-sync <message>``
-#
-#``lava-sync foo`` is effectively the same as ``lava-send foo`` followed
-#by ``lava-wait-all foo``.
-LAVA_MULTI_NODE_API="LAVA_SYNC"
-#MESSAGE_TIMEOUT=5
-#MESSAGE_NEED_ACK=yes
-
-. $LAVA_TEST_BIN/lava-multi-node.lib
-
-lava_multi_node_send $1
-
-lava_multi_node_wait_for_message

=== removed file 'lava_test_shell/multi_node/lava-wait'
--- lava_test_shell/multi_node/lava-wait	2013-08-26 08:33:13 +0000
+++ lava_test_shell/multi_node/lava-wait	1970-01-01 00:00:00 +0000
@@ -1,21 +0,0 @@ 
-#!/bin/sh
-#
-#This file is for Multi-Node test
-#
-#Waits until any other device in the group sends a message with the given
-#ID. This call will block until such message is sent.
-#
-#Usage: ``lava-wait <message-id>``
-#
-#If there was data passed in the message, the key-value pairs will be
-#printed in the standard output, each in one line. If no key values were
-#passed, nothing is printed.
-LAVA_MULTI_NODE_API="LAVA_WAIT"
-#MESSAGE_TIMEOUT=5
-#MESSAGE_NEED_ACK=yes
-
-. $LAVA_TEST_BIN/lava-multi-node.lib
-
-lava_multi_node_send $1
-
-lava_multi_node_wait_for_message

=== removed file 'lava_test_shell/multi_node/lava-wait-all'
--- lava_test_shell/multi_node/lava-wait-all	2013-08-26 08:33:13 +0000
+++ lava_test_shell/multi_node/lava-wait-all	1970-01-01 00:00:00 +0000
@@ -1,23 +0,0 @@ 
-#!/bin/sh
-#
-#This file is for Multi-Node test
-#
-#Waits until **all** other devices in the group send a message with the
-#given message ID. IF ``<role>`` is passed, only wait until all devices
-#with that given role send a message.
-#
-#``lava-wait-all <message-id> [<role>]``
-#
-#If data was sent by the other devices with the message, the key-value
-#pairs will be printed one per line, prefixed with the device name and
-#whitespace.
-LAVA_MULTI_NODE_API="LAVA_WAIT_ALL"
-#MESSAGE_TIMEOUT=5
-#MESSAGE_NEED_ACK=yes
-
-. $LAVA_TEST_BIN/lava-multi-node.lib
-
-lava_multi_node_send $1 $2
-
-lava_multi_node_wait_for_message
-

=== removed file 'requirements.txt'
--- requirements.txt	2013-07-03 10:17:15 +0000
+++ requirements.txt	1970-01-01 00:00:00 +0000
@@ -1,7 +0,0 @@ 
-django
-django-openid-auth
-pexpect
-python-openid
-lockfile
-python-daemon
-setproctitle

=== removed file 'setup.cfg'
--- setup.cfg	2012-02-07 18:54:49 +0000
+++ setup.cfg	1970-01-01 00:00:00 +0000
@@ -1,2 +0,0 @@ 
-[upload]
-sign=True

=== removed file 'setup.py'
--- setup.py	2013-09-05 21:53:57 +0000
+++ setup.py	1970-01-01 00:00:00 +0000
@@ -1,63 +0,0 @@ 
-#!/usr/bin/env python
-
-from setuptools import setup, find_packages
-
-setup(
-    name="lava-dispatcher",
-    version=":versiontools:lava_dispatcher:",
-    url='https://launchpad.net/lava-dispatcher',
-    license='GPL v2 or later',
-    description="Part of the LAVA framework for dispatching test jobs",
-    author='Linaro Validation Team',
-    author_email='linaro-validation@lists.linaro.org',
-    namespace_packages=['lava'],
-    entry_points="""
-    [lava.commands]
-    dispatch = lava.dispatcher.commands:dispatch
-    connect = lava.dispatcher.commands:connect
-    devices = lava.dispatcher.commands:devices
-    power-cycle = lava.dispatcher.commands:power_cycle
-
-    [lava.signal_handlers]
-    add-duration = lava_dispatcher.signals.duration:AddDuration
-    arm-probe = lava_dispatcher.signals.armprobe:ArmProbe
-    shell-hooks = lava_dispatcher.signals.shellhooks:ShellHooks
-    """,
-    packages=find_packages(),
-    package_data={
-        'lava_dispatcher': [
-            'default-config/lava-dispatcher/lava-dispatcher.conf',
-            'default-config/lava-dispatcher/lava-dispatcher.conf',
-            'default-config/lava-dispatcher/device-defaults.conf',
-            'default-config/lava-dispatcher/device-types/*.conf',
-            'default-config/lava-dispatcher/devices/*.conf',
-        ],
-    },
-    data_files=[
-        ('lava_test_shell', [
-            'lava_test_shell/lava-test-case',
-            'lava_test_shell/lava-test-case-attach',
-            'lava_test_shell/lava-test-run-attach',
-            'lava_test_shell/lava-test-runner-android',
-            'lava_test_shell/lava-test-runner-ubuntu',
-            'lava_test_shell/lava-test-shell',
-            ])
-    ],
-    install_requires=[
-        'json-schema-validator >= 2.3',
-        'lava-tool >= 0.4',
-        'lava-utils-interface',
-        'linaro-dashboard-bundle',
-        'pexpect >= 2.3',
-        'configglue',
-        'PyYAML',
-        'versiontools >= 1.8',
-        'pyserial >= 2.6',
-    ],
-    setup_requires=[
-        'versiontools >= 1.8',
-    ],
-    scripts=[
-        'lava-dispatch'
-    ],
-)