diff mbox

[Branch,~linaro-validation/lava-test/trunk] Rev 134: Merge two fix-annoyances branches with lots of changes:

Message ID 20120312132115.3494.43441.launchpad@ackee.canonical.com
State Accepted
Headers show

Commit Message

Zygmunt Krynicki March 12, 2012, 1:21 p.m. UTC
Merge authors:
  Zygmunt Krynicki (zkrynicki)
Related merge proposals:
  https://code.launchpad.net/~zkrynicki/lava-test/fix-annoyances-2/+merge/96912
  proposed by: Zygmunt Krynicki (zkrynicki)
  https://code.launchpad.net/~zkrynicki/lava-test/fix-annoyances/+merge/96638
  proposed by: Zygmunt Krynicki (zkrynicki)
  review: Approve - Le Chi Thu (le-chi-thu)
------------------------------------------------------------
revno: 134 [merge]
committer: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
branch nick: trunk
timestamp: Mon 2012-03-12 14:18:46 +0100
message:
  Merge two fix-annoyances branches with lots of changes:
  
   * PEP8 fixes
   * Logging fixes
   * Consistency changes
   * Some command changes (simplifications)
modified:
  doc/changes.rst
  lava_test/api/core.py
  lava_test/api/delegates.py
  lava_test/commands.py
  lava_test/core/config.py
  lava_test/core/installers.py
  lava_test/core/loader.py
  lava_test/core/providers.py
  lava_test/core/runners.py
  lava_test/core/swprofile.py
  lava_test/core/tests.py
  lava_test/main.py
  lava_test/test_definitions/bluetooth-enablement.py
  lava_test/test_definitions/bootchart.py
  lava_test/test_definitions/firefox.py
  lava_test/test_definitions/glmemperf.py
  lava_test/test_definitions/gmpbench.py
  lava_test/test_definitions/gtkperf.py
  lava_test/test_definitions/insanity.py
  lava_test/test_definitions/leb-basic-graphics.py
  lava_test/test_definitions/ltp.py
  lava_test/test_definitions/lttng.py
  lava_test/test_definitions/peacekeeper.py
  lava_test/test_definitions/peacekeeper/peacekeeper_runner.py
  lava_test/test_definitions/perf.py
  lava_test/test_definitions/posixtestsuite.py
  lava_test/test_definitions/pwrmgmt.py
  lava_test/test_definitions/pybench.py
  lava_test/test_definitions/smem.py
  lava_test/test_definitions/stream.py
  lava_test/test_definitions/tiobench.py
  lava_test/test_definitions/wifi-enablement.py
  lava_test/test_definitions/x11perf.py
  lava_test/test_definitions/xrestop.py
  lava_test/utils.py


--
lp:lava-test
https://code.launchpad.net/~linaro-validation/lava-test/trunk

You are subscribed to branch lp:lava-test.
To unsubscribe from this branch go to https://code.launchpad.net/~linaro-validation/lava-test/trunk/+edit-subscription
diff mbox

Patch

=== modified file 'doc/changes.rst'
--- doc/changes.rst	2012-03-12 07:55:04 +0000
+++ doc/changes.rst	2012-03-12 13:18:46 +0000
@@ -19,6 +19,10 @@ 
  * Add new render-bench test
  * Add spandex-gles2 test
  * Add e2eaudiotest test 
+ * Document :meth:`lava_test.api.core.ITest.run()` and
+   :attr:`lava_test.api.core.ITest.test_id`. This change fixes
+   https://bugs.launchpad.net/lava-test/+bug/919268
+ * Remove a lot of PEP8 issues from the code
 
 .. _version_0_4:
 

=== modified file 'lava_test/api/core.py'
--- lava_test/api/core.py	2011-09-12 09:19:10 +0000
+++ lava_test/api/core.py	2012-03-08 18:01:17 +0000
@@ -26,6 +26,7 @@ 
 
 from lava_test.api import _Interface
 
+
 class ITest(_Interface):
     """
     Abstract test definition.
@@ -49,6 +50,12 @@ 
         .. versionadded:: 0.2
         """
 
+    @abstractproperty
+    def test_id(self):
+        """
+        The unique name of this test
+        """
+
     @abstractmethod
     def install(self, observer):
         """
@@ -62,7 +69,8 @@ 
         :param observer:
             Observer object that makes it possible to monitor the actions
             performed by the test installer.
-        :type observer: :class:`~lava_test.api.observers.ITestInstallerObserver`
+        :type observer:
+            :class:`~lava_test.api.observers.ITestInstallerObserver`
 
         .. versionadded:: 0.2
         """
@@ -81,18 +89,24 @@ 
         """
 
     @abstractmethod
-    def run(self, observer):
+    def run(self, observer, test_options):
         """
         Run the test program and store artifacts.
-        
+
         :param observer:
             Observer object that makes it possible to monitor the actions
             performed by the test runner.
-        :type observer: :class:`~lava_test.api.observers.ITestRunnerObserver` 
-        :return: Test run artifacts
-        :rtype: :class:`~lava_test.core.artifacts.TestArtifacts`.
+        :type observer: :class:`~lava_test.api.observers.ITestRunnerObserver`
+        :param test_options:
+            Arbitrary string that was provided by the user.
+        :type options: :class:`str`
+        :return: Pair with test run artifacts and exit code
+        :rtype:
+            :class:`~lava_test.core.artifacts.TestArtifacts` and :class:`bool`
 
         .. versionadded:: 0.2
+        .. versionchanged:: 0.3.1
+            Added options argument and changed the return type to be a tuple
         """
 
     @abstractmethod

=== modified file 'lava_test/api/delegates.py'
--- lava_test/api/delegates.py	2011-10-13 10:23:18 +0000
+++ lava_test/api/delegates.py	2012-03-08 18:04:02 +0000
@@ -47,7 +47,8 @@ 
         :param observer:
             Observer object that makes it possible to monitor the actions
             performed by the test installer.
-        :type observer: :class:`~lava_test.api.observers.ITestInstallerObserver`
+        :type observer:
+            :class:`~lava_test.api.observers.ITestInstallerObserver`
 
         .. versionadded:: 0.2
         """
@@ -116,7 +117,8 @@ 
         """
         Results dictionary to be merged with TestRun object inside the bundle.
 
-        .. seealso:: :meth:`~lava_test.core.artifacts.TestArtifacts.incorporate_parse_results`
+        .. seealso::
+            :meth:`~lava_test.core.artifacts.TestArtifacts.incorporate_parse_results`
 
         .. versionadded:: 0.1
         """

=== modified file 'lava_test/commands.py'
--- lava_test/commands.py	2012-03-10 13:21:53 +0000
+++ lava_test/commands.py	2012-03-08 19:17:05 +0000
@@ -138,7 +138,6 @@ 
                 self.say("No tests installed")
 
 
-
 class TestAffectingCommand(Command):
 
     INSTALL_REQUIRED = False
@@ -191,76 +190,89 @@ 
     def register_arguments(cls, parser):
         super(run, cls).register_arguments(parser)
         group = parser.add_argument_group("initial bundle configuration")
-        group.add_argument("-S", "--skip-software-context",
-                            default=False,
-                            action="store_true",
-                           help=("Do not store the software context in the"
-                                 " initial bundle. Typically this saves OS"
-                                 " image name and all the installed software"
-                                 " packages."))
-        group.add_argument("-H", "--skip-hardware-context",
-                            default=False,
-                            action="store_true",
-                           help=("Do not store the hardware context in the"
-                                 " initial bundle. Typically this saves CPU,"
-                                 " memory and USB device information."))
-        group.add_argument("--trusted-time",
-                            default=False,
-                            action="store_true",
-                            help=("Indicate that the real time clock has"
-                                  " accurate data. This can differentiate"
-                                  " test results created on embedded devices"
-                                  " that often have inaccurate real time"
-                                  " clock settings."))
-        group.add_argument("--analyzer-assigned-uuid",
-                           default=None,
-                           metavar="UUID",
-                           help=("Set the analyzer_assigned_uuid to the specified value."
-                                 " This will prevent the test device from attempting"
-                                 " to generate an UUID by itself. This option may be"
-                                 " required if the test device has unreliable real"
-                                 " time clock (no battery backed, not ensure to be"
-                                 " up-to-date) and unreliable/random hardware ethernet "
-                                 " address."))
+        group.add_argument(
+            "-S", "--skip-software-context",
+            default=False,
+            action="store_true",
+            help=("Do not store the software context in the"
+                  " initial bundle. Typically this saves OS"
+                  " image name and all the installed software"
+                  " packages."))
+        group.add_argument(
+            "-H", "--skip-hardware-context",
+            default=False,
+            action="store_true",
+            help=("Do not store the hardware context in the"
+                  " initial bundle. Typically this saves CPU,"
+                  " memory and USB device information."))
+        group.add_argument(
+            "--trusted-time",
+            default=False,
+            action="store_true",
+            help=("Indicate that the real time clock has"
+                  " accurate data. This can differentiate"
+                  " test results created on embedded devices"
+                  " that often have inaccurate real time"
+                  " clock settings."))
+        group.add_argument(
+            "--analyzer-assigned-uuid",
+            default=None,
+            metavar="UUID",
+            help=("Set the analyzer_assigned_uuid to the specified value."
+                  " This will prevent the test device from attempting"
+                  " to generate an UUID by itself. This option may be"
+                  " required if the test device has unreliable real"
+                  " time clock (no battery backed, not ensure to be"
+                  " up-to-date) and unreliable/random hardware ethernet "
+                  " address."))
 
         group = parser.add_argument_group("complete bundle configuration")
-        group.add_argument("-o", "--output",
-                            default=None,
-                            metavar="FILE",
-                           help=("After running the test parse the result"
-                                 " artifacts, fuse them with the initial"
-                                 " bundle and finally save the complete bundle"
-                                 " to the  specified FILE."))
-        group.add_argument("-A", "--skip-attachments",
-                            default=False,
-                            action="store_true",
-                            help=("Do not store standard output and standard"
-                                  " error log files as attachments. This"
-                                  " option is only affecting the bundle"
-                                  " created with --output, the initial bundle"
-                                  " is not affected as it never stores any"
-                                  " attachments."))
-
-        parser.add_argument("-t", "--test-options",
-                            default=None,
-                            help="Override the default test options. "
-                                 "The $(OPTIONS) in the run steps will be replaced by the options."
-                                 "See peacekeeper.py as example how to use this feature."
-                                 "To provide multiple options, use quote character."
-                                 "Example : lava-test run peacekeeper -t firefox.  "
-                                 "Example of multiple options : lava-test run foo_test -t 'arg1 arg2'")
+        group.add_argument(
+            "-o", "--output",
+            default=None,
+            metavar="FILE",
+            help=("After running the test parse the result"
+                  " artifacts, fuse them with the initial"
+                  " bundle and finally save the complete bundle"
+                  " to the  specified FILE."))
+        group.add_argument(
+            "-A", "--skip-attachments",
+            default=False,
+            action="store_true",
+            help=("Do not store standard output and standard"
+                  " error log files as attachments. This"
+                  " option is only affecting the bundle"
+                  " created with --output, the initial bundle"
+                  " is not affected as it never stores any"
+                  " attachments."))
+
+        parser.add_argument(
+            "-t", "--test-options",
+            default=None,
+            help=(
+                "Override the default test options."
+                " The value is passed verbatim to test definition. Typically"
+                " this is simply used in shell commands by expanding the"
+                " string $(OPTIONS). Please refer to the built-in"
+                " peacekeeper.py for examples. Depending on your shell you"
+                " probably have to escape spaces and other special"
+                " characters if you wish to include them in your argument"
+                " options."))
+
     def invoke_with_test(self, test):
         # Validate analyzer_assigned_uuid
         if self.args.analyzer_assigned_uuid:
             import uuid
             try:
-                self.analyzer_assigned_uuid=str(uuid.UUID(self.args.analyzer_assigned_uuid))
+                self.analyzer_assigned_uuid = str(
+                    uuid.UUID(self.args.analyzer_assigned_uuid))
             except ValueError as exc:
                 self.parser.error("--analyzer-assigned-uuid: %s" % exc)
         if not test.is_installed:
             raise LavaCommandError("The specified test is not installed")
         try:
-            artifacts, run_fail = test.run(self, test_options=self.args.test_options)
+            artifacts, run_fail = test.run(
+                self, test_options=self.args.test_options)
         except subprocess.CalledProcessError as ex:
             if ex.returncode is None:
                 raise LavaCommandError("Command %r was aborted" % ex.cmd)
@@ -413,25 +425,28 @@ 
         except KeyError:
             raise LavaCommandError("There is no test_url")
 
+
 class unregister_test(Command):
     """
-    Unregister remote test
+    Remove a declarative test from the registry
+    This command does the reverse of lava-test register. You need to pass the
+    same URL you've used in `lava-test register-test`
     """
 
     @classmethod
     def register_arguments(cls, parser):
         super(unregister_test, cls).register_arguments(parser)
-        parser.add_argument("test_url",
-                            help="Url for test definition file")
+        parser.add_argument(
+            "url",
+            metavar="URL",
+            help="URL of the test definition file")
 
     def invoke(self):
+        from lava_test.core.providers import RegistryProvider
         try:
-            from lava_test.core.providers import RegistryProvider
-            RegistryProvider.unregister_remote_test(self.args.test_url)
-        except ValueError as exc:
-            raise LavaCommandError("Unable to unregister test: %s" % exc)
-        except KeyError:
-            raise LavaCommandError("There is no test_url")
+            RegistryProvider.unregister_remote_test(self.args.url)
+        except ValueError:
+            raise LavaCommandError("This test is not registered")
 
 
 class reset(Command):
@@ -449,4 +464,4 @@ 
         shutil.rmtree(self._config.resultsdir, ignore_errors=True)
         cache = Cache.get_instance()
         print "Delete " + cache.cache_dir
-        shutil.rmtree(cache.cache_dir, ignore_errors=True)
\ No newline at end of file
+        shutil.rmtree(cache.cache_dir, ignore_errors=True)

=== modified file 'lava_test/core/config.py'
--- lava_test/core/config.py	2012-03-05 21:43:48 +0000
+++ lava_test/core/config.py	2012-03-08 18:04:16 +0000
@@ -26,7 +26,8 @@ 
         basedata = os.environ.get('XDG_DATA_HOME',
                      os.path.join(home, '.local', 'share'))
         self.configdir = os.path.join(baseconfig, 'lava_test')
-        self.installdir = os.path.join(basedata, 'lava_test', 'installed-tests')
+        self.installdir = os.path.join(
+            basedata, 'lava_test', 'installed-tests')
         self.resultsdir = os.path.join(basedata, 'lava_test', 'results')
         self.registry = self._load_registry()
 
@@ -54,11 +55,11 @@ 
                 "entry_point": "lava_test.core.providers:BuiltInProvider"
             }, {
                 "entry_point": "lava_test.core.providers:PkgResourcesProvider",
-                "config": {"namespace": "lava_test.test_definitions" }
+                "config": {"namespace": "lava_test.test_definitions"}
             },
             {
                 "entry_point": "lava_test.core.providers:RegistryProvider",
-                "config": {"entries": [] }
+                "config": {"entries": []}
             }]
         }
 

=== modified file 'lava_test/core/installers.py'
--- lava_test/core/installers.py	2012-02-27 03:15:13 +0000
+++ lava_test/core/installers.py	2012-03-08 18:05:05 +0000
@@ -53,18 +53,22 @@ 
             self.steps, self.deps, self.url, self.md5)
 
     def _run_shell_cmd(self, cmd, observer):
-        if observer: observer.about_to_run_shell_command(cmd)
+        if observer:
+            observer.about_to_run_shell_command(cmd)
         extcmd = ExternalCommandWithDelegate(observer)
         returncode = extcmd.check_call(cmd, shell=True)
-        if observer: observer.did_run_shell_command(cmd, returncode)
+        if observer:
+            observer.did_run_shell_command(cmd, returncode)
 
     def _installdeps(self, observer):
         if self.deps:
-            if observer: observer.about_to_install_packages(self.deps)
+            if observer:
+                observer.about_to_install_packages(self.deps)
             # XXX: Possible point of target-specific package installation
             cmd = "sudo apt-get install -y --force-yes " + " ".join(self.deps)
             self._run_shell_cmd(cmd, observer)
-            if observer: observer.did_install_packages(self.deps)
+            if observer:
+                observer.did_install_packages(self.deps)
 
     def _download(self, observer):
         """
@@ -74,14 +78,16 @@ 
         """
         if not self.url:
             return
-        if observer: observer.about_to_download_file(self.url)
+        if observer:
+            observer.about_to_download_file(self.url)
         filename = geturl(self.url)
         # If the file does not exist, then the download was not
         # successful
         if not os.path.exists(filename):
             raise RuntimeError(
                 "Failed to download %r" % self.url)
-        if observer: observer.did_download_file(self.url)
+        if observer:
+            observer.did_download_file(self.url)
         if self.md5:
             checkmd5 = hashlib.md5()
             with open(filename, 'rb') as fd:

=== modified file 'lava_test/core/loader.py'
--- lava_test/core/loader.py	2012-01-17 12:42:46 +0000
+++ lava_test/core/loader.py	2012-03-08 18:08:15 +0000
@@ -18,6 +18,7 @@ 
 
 import logging
 
+
 class TestLoader(object):
     """
     Test loader.
@@ -53,11 +54,11 @@ 
                 raise RuntimeError(
                     "lava-test is not properly set up."
                     " Please read the README file")
-            except ImportError, err:
-                logging.warning("Couldn't load module : %s . Maybe configuration needs to be updated" % module_name)
-                logging.warning("The configuration is stored at %s" %(get_config().configdir))
-
-
+            except ImportError:
+                logging.warning("Couldn't load module: %s", module_name)
+                logging.warning(
+                    "The configuration may need updating, it is stored in %r",
+                    get_config().configdir)
 
     def __getitem__(self, test_id):
         """

=== modified file 'lava_test/core/providers.py'
--- lava_test/core/providers.py	2012-02-24 18:39:28 +0000
+++ lava_test/core/providers.py	2012-03-08 18:15:41 +0000
@@ -12,8 +12,6 @@ 
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
-import sys
-from urllib2 import URLError
 
 from lava_test.api.core import ITestProvider
 from lava_test.core.config import get_config
@@ -75,8 +73,8 @@ 
     """
     Test provider that provides tests declared in pkg_resources working_set
 
-    By default it looks at the 'lava_test.test_definitions' name space but it can
-    be changed with custom 'namespace' configuration entry.
+    By default it looks at the 'lava_test.test_definitions' name space but it
+    can be changed with custom 'namespace' configuration entry.
     """
 
     def __init__(self, config):
@@ -139,22 +137,12 @@ 
 
     @classmethod
     def unregister_remote_test(self, test_url):
-        config = get_config()  # This is a different config object from
-                             # self._config
-        provider_config = config.get_provider_config("lava_test.core.providers:RegistryProvider")
-
-        if "entries" not in provider_config:
-            provider_config["entries"] = []
-
-        logging.info("removing remote test : " + test_url)
-        for entry in provider_config["entries"]:
-          logging.debug("found entry = " + str(entry))
-
-        if test_url in provider_config["entries"]:
-            provider_config["found remote test : "].remove(test_url)
-            config._save_registry()
-        else:
-            raise ValueError("This test is not registered")
+        # This is a different config object from self._config
+        config = get_config()
+        provider_config = config.get_provider_config(
+            "lava_test.core.providers:RegistryProvider")
+        provider_config.get("entries", []).remove(test_url)
+        config._save_registry()
 
     def _load_remote_test(self, test_url):
         """
@@ -175,11 +163,13 @@ 
             try:
                 test = self._load_remote_test(test_url)
                 if test.test_id in self._cache:
-                    raise ValueError("Duplicate test %s declared" % test.test_id)
+                    raise ValueError(
+                        "Duplicate test %s declared" % test.test_id)
                 self._cache[test.test_id] = test
             except IOError as exc:
                 logging.warning(
-                    "Unable to load test definition from %r: %r", test_url, exc)
+                    "Unable to load test definition from %r: %r",
+                    test_url, exc)
                 if hasattr(exc, 'reason'):
                     logging.warning("Error reason: %r", exc.reason)
                 elif hasattr(exc, 'code'):
@@ -187,8 +177,9 @@ 
             except Exception as exc:
                 # This can be a number of things, including URL errors, JSON
                 # errors and validation errors
-                logging.warning('Unable to load test definition from %r: %r', 
-                                test_url, exc)
+                logging.warning(
+                    'Unable to load test definition from %r: %r',
+                    test_url, exc)
 
     def __iter__(self):
         self._fill_cache()

=== modified file 'lava_test/core/runners.py'
--- lava_test/core/runners.py	2011-10-20 17:53:31 +0000
+++ lava_test/core/runners.py	2012-03-08 18:27:42 +0000
@@ -50,22 +50,27 @@ 
                 # should override the test options in the step ?
                 if cmd.find("$(OPTIONS)") > 0:
                     # check if test options is provided or use default options
-                    if not test_options and self.default_options == None:
-                        raise RuntimeError("Test options is missing. No default value is provided.")
+                    if not test_options and self.default_options is None:
+                        raise RuntimeError((
+                            "Test options is missing."
+                            " No default value was provided."))
                     if not test_options:
                         test_options = self.default_options
-                    cmd = cmd.replace("$(OPTIONS)",test_options)
+                    cmd = cmd.replace("$(OPTIONS)", test_options)
 
-                if observer: observer.about_to_run_shell_command(cmd)
+                if observer:
+                    observer.about_to_run_shell_command(cmd)
                 returncode = extcmd.call(cmd, shell=True)
-                if observer: observer.did_run_shell_command(cmd, returncode)
-                if (returncode != 0): run_failed = True
+                if observer:
+                    observer.did_run_shell_command(cmd, returncode)
+                if returncode != 0:
+                    run_failed = True
         finally:
             stdout.close()
             stderr.close()
         return run_failed
 
-    def run(self, artifacts, observer=None, test_options = None):
+    def run(self, artifacts, observer=None, test_options=None):
         """
         Run the test program by executing steps in sequence.
 
@@ -74,6 +79,7 @@ 
             :meth:`~lava_test.api.delegates.TestRunner.run`
         """
         self.starttime = datetime.datetime.utcnow()
-        run_failed = self._run_lava_test_steps(artifacts, observer, test_options)
+        run_failed = self._run_lava_test_steps(
+            artifacts, observer, test_options)
         self.endtime = datetime.datetime.utcnow()
         return run_failed

=== modified file 'lava_test/core/swprofile.py'
--- lava_test/core/swprofile.py	2012-03-06 17:49:19 +0000
+++ lava_test/core/swprofile.py	2012-03-08 18:28:18 +0000
@@ -64,7 +64,7 @@ 
         name = buildstamp.splitlines()[1]
     except IOError:
         import lsb_release
-        
+
         if lsb_information == None:
             lsb_information = lsb_release.get_distro_information()
         name = lsb_information['DESCRIPTION']

=== modified file 'lava_test/core/tests.py'
--- lava_test/core/tests.py	2012-03-08 17:03:20 +0000
+++ lava_test/core/tests.py	2012-03-08 18:28:27 +0000
@@ -51,8 +51,8 @@ 
         ITestParser instance to use
     """
 
-    def __init__(self, test_id, test_version=None,
-                 installer=None, runner=None, parser=None, default_options=None):
+    def __init__(self, test_id, test_version=None, installer=None, runner=None,
+                 parser=None, default_options=None):
         self._test_id = test_id
         self._test_version = test_version
         # Delegate objects
@@ -60,7 +60,7 @@ 
         self.runner = runner
         self.parser = parser
         self.default_options = default_options
-        
+
         # Config instance
         self._config = get_config()
 

=== modified file 'lava_test/main.py'
--- lava_test/main.py	2012-03-05 21:43:48 +0000
+++ lava_test/main.py	2012-03-08 17:59:01 +0000
@@ -16,8 +16,6 @@ 
 import logging
 import logging.config
 
-from  lava_test.core.config import get_config
-
 from lava_tool.dispatcher import LavaDispatcher, run_with_dispatcher_class
 
 
@@ -34,19 +32,14 @@ 
 
 
 def main():
-    # default logging level is warning. -v or --verbose will change it to debug (in Command class).
+    # default logging level is warning. -v or --verbose will change it to debug
+    # (in Command class).
     FORMAT = '<LAVA_TEST>%(asctime)s %(levelname)s: %(message)s'
-    DATEFMT= '%Y-%m-%d %I:%M:%S %p'
-    logging.basicConfig(format=FORMAT,datefmt=DATEFMT)
+    DATEFMT = '%Y-%m-%d %I:%M:%S %p'
+    logging.basicConfig(format=FORMAT, datefmt=DATEFMT)
     logging.root.setLevel(logging.WARNING)
-
-    run_with_dispatcher_class(LAVATestDispatcher)
+    return run_with_dispatcher_class(LAVATestDispatcher)
 
 
 if __name__ == '__main__':
-    import os
-    import sys
-    arg_only = sys.argv
-    arg_only.remove(arg_only[0])
-    code = LAVATestDispatcher().dispatch(arg_only)
-    if (code != 0): exit(code)
\ No newline at end of file
+    raise SystemExit(main())

=== modified file 'lava_test/test_definitions/bluetooth-enablement.py'
--- lava_test/test_definitions/bluetooth-enablement.py	2012-02-07 23:13:34 +0000
+++ lava_test/test_definitions/bluetooth-enablement.py	2012-03-08 19:22:08 +0000
@@ -31,17 +31,23 @@ 
 from lava_test.core.tests import Test
 
 DEFAULT_OPTIONS = ""
-INSTALLSTEPS = ["bzr branch lp:~linaro-foundations/linaro-ubuntu/lava-test-bt-enablement bluetooth-enablement"]
+INSTALLSTEPS = [(
+    "bzr branch lp:~linaro-foundations/linaro-ubuntu/lava-test-bt-enablement"
+    " bluetooth-enablement")]
 DEPS = ["bzr", "bluez"]
 RUNSTEPS = ["cd bluetooth-enablement; sudo bash -x ./run-test.sh"]
 PATTERN = "(?P<test_case_id>[a-zA-Z0-9_-]+):\s(?P<result>\w+)"
 FIXUPS = {
-        "PASS": "pass",
-        "FAIL": "fail"
-        }
-
-testinst = TestInstaller(INSTALLSTEPS, deps=DEPS)
-testrun = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
-testparser = TestParser(PATTERN, fixupdict = FIXUPS)
-testobj = Test(test_id="bluetooth-enablement", installer=testinst,
-                          runner=testrun, parser=testparser)
+    "PASS": "pass",
+    "FAIL": "fail"
+}
+
+installer = TestInstaller(INSTALLSTEPS, deps=DEPS)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN, fixupdict=FIXUPS)
+
+testobj = Test(
+    test_id="bluetooth-enablement",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/bootchart.py'
--- lava_test/test_definitions/bootchart.py	2012-01-19 00:11:33 +0000
+++ lava_test/test_definitions/bootchart.py	2012-03-08 19:22:09 +0000
@@ -14,7 +14,7 @@ 
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
-Bootchart is a benchmark for measuring the time it takes to boot the system 
+Bootchart is a benchmark for measuring the time it takes to boot the system
 to a point where it hands over control to the user.
 
 **URL:** http://bootchart.org
@@ -34,9 +34,12 @@ 
 RUNSTEPS = ['./bootchartscript/bootchartscript.sh $(OPTIONS)']
 PATTERN = "^(?P<test_case_id>\w+):\W+(?P<measurement>\d+\.\d+)"
 
-bootchartinst = TestInstaller(INSTALLSTEPS, deps=DEPS)
-bootchartrun = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
-bootchartparser = TestParser(PATTERN,
-                   appendall={'units':'sec', 'result':'pass'})
-testobj = Test(test_id="bootchart", installer=bootchartinst,
-                                  runner=bootchartrun, parser=bootchartparser)
+installer = TestInstaller(INSTALLSTEPS, deps=DEPS)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN, appendall={'units': 'sec', 'result': 'pass'})
+
+testobj = Test(
+    test_id="bootchart",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/firefox.py'
--- lava_test/test_definitions/firefox.py	2012-01-19 00:11:33 +0000
+++ lava_test/test_definitions/firefox.py	2012-03-08 19:22:11 +0000
@@ -29,12 +29,17 @@ 
 DEFAULT_OPTIONS = ""
 INSTALLSTEPS = ['git clone git://github.com/janimo/firefox-startup-timing.git']
 DEPS = ['firefox', 'git-core', 'gcalctool']
-RUNSTEPS = ['cd firefox-startup-timing; ./firefox_startup_timing.sh $(OPTIONS)']
+RUNSTEPS = [(
+    'cd firefox-startup-timing;'
+    ' ./firefox_startup_timing.sh $(OPTIONS)')]
 PATTERN = "^(?P<test_case_id>\w+):(?P<measurement>\d+)"
 
-firefoxinst = TestInstaller(INSTALLSTEPS, deps=DEPS)
-firefoxrun = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
-firefoxparser = TestParser(PATTERN,
-               appendall={'units':'ms', 'result':'pass'})
-testobj = Test(test_id="firefox", installer=firefoxinst,
-                                  runner=firefoxrun, parser=firefoxparser)
+installer = TestInstaller(INSTALLSTEPS, deps=DEPS)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN, appendall={'units': 'ms', 'result': 'pass'})
+
+testobj = Test(
+    test_id="firefox",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/glmemperf.py'
--- lava_test/test_definitions/glmemperf.py	2012-01-19 00:11:33 +0000
+++ lava_test/test_definitions/glmemperf.py	2012-03-08 19:22:12 +0000
@@ -35,11 +35,12 @@ 
 RUNSTEPS = ["glmemperf $(OPTIONS)"]
 PATTERN = "^(?P<test_case_id>\w+):\W+(?P<measurement>\d+) fps"
 
-inst = TestInstaller(deps=["glmemperf"])
-run = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
-parse = TestParser(PATTERN,
-                                      appendall={'units':'fps',
-                                                 'result':'pass'})
+installer = TestInstaller(deps=["glmemperf"])
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN, appendall={'units': 'fps', 'result': 'pass'})
 
-testobj = Test(test_id="glmemperf", installer=inst,
-                                  runner=run, parser=parse)
+testobj = Test(
+    test_id="glmemperf",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/gmpbench.py'
--- lava_test/test_definitions/gmpbench.py	2012-01-19 00:11:33 +0000
+++ lava_test/test_definitions/gmpbench.py	2012-03-08 19:22:15 +0000
@@ -32,24 +32,28 @@ 
 from lava_test.core.tests import Test
 
 
-VERSION='0.2'
-URL="ftp://ftp.gmplib.org/pub/misc/gmpbench-%s.tar.bz2" %(VERSION)
-URL_gexpr="http://www.gmplib.org/gexpr.c"
+VERSION = '0.2'
+URL = "ftp://ftp.gmplib.org/pub/misc/gmpbench-%s.tar.bz2" % VERSION
+URL_gexpr = "http://www.gmplib.org/gexpr.c"
 DEPS = ['gcc', 'libgmp3-dev', 'wget', 'bzip2']
 
 DEFAULT_OPTIONS = ""
-INSTALLSTEPS = ['tar -xjf  gmpbench-0.2.tar.bz2',
-                'wget -c %s' %(URL_gexpr),
+INSTALLSTEPS = ['tar -xjf gmpbench-0.2.tar.bz2',
+                'wget -c %s' % URL_gexpr,
                 'mv gexpr.c gmpbench-0.2',
                 'cd gmpbench-0.2 && gcc -o gexpr gexpr.c  -static -lm']
 RUNSTEPS = ['cd  gmpbench-0.2 && PATH=$PATH:. ./runbench $(OPTIONS)']
-PATTERN = "\s*(?P<test_case_id>GMPbench\.*\w*\.*\w*):?\s*"\
-          "(?P<measurement>\d+.\d+)"
-
-gmpbenchinst = TestInstaller(INSTALLSTEPS, deps=DEPS,
-                                                url=URL)
-gmpbenchrun = TestRunner(RUNSTEPS,default_options=DEFAULT_OPTIONS)
-gmpbenchparser = TestParser(PATTERN,
-    appendall={'units':'operations/s', 'result':'pass'})
-testobj = Test(test_id="gmpbench", installer=gmpbenchinst,
-    runner=gmpbenchrun, parser=gmpbenchparser)
+PATTERN = (
+    "\s*(?P<test_case_id>GMPbench\.*\w*\.*\w*):?\s*"
+    "(?P<measurement>\d+.\d+)")
+
+installer = TestInstaller(INSTALLSTEPS, deps=DEPS, url=URL)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN, appendall={
+    'units': 'operations/s', 'result': 'pass'})
+
+testobj = Test(
+    test_id="gmpbench",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/gtkperf.py'
--- lava_test/test_definitions/gtkperf.py	2012-01-19 00:11:33 +0000
+++ lava_test/test_definitions/gtkperf.py	2012-03-08 19:22:16 +0000
@@ -14,7 +14,8 @@ 
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
-GtkPerf is an application designed to test GTK+ performance using predefined GTK+ widgets.
+GtkPerf is an application designed to test GTK+ performance using predefined
+GTK+ widgets.
 
 **URL:** http://gtkperf.sourceforge.net/
 
@@ -32,21 +33,29 @@ 
 # Run tests automatically, 500 repetitions each
 DEFAULT_OPTIONS = "-a -c 500"
 
-RUNSTEPS = ["LANG=C gtkperf  $(OPTIONS)"]
+RUNSTEPS = ["LANG=C gtkperf $(OPTIONS)"]
+
 
 class GtkTestParser(TestParser):
+
     def parse(self, artifacts):
-        PAT1 = "^(?P<test_case_id>\w+) - (?P<subtest>\w*\W*\w*) - time:\W+(?P<measurement>\d+\.\d+)"
-        PAT2 = "^(?P<test_case_id>\w+) - time:\W+(?P<measurement>\d+\.\d+)"
+        PAT1 = (
+            "^(?P<test_case_id>\w+)"
+            " - (?P<subtest>\w*\W*\w*)"
+            " - time:\W+(?P<measurement>\d+\.\d+)")
+        PAT2 = (
+            "^(?P<test_case_id>\w+)"
+            " - time:\W+(?P<measurement>\d+\.\d+)")
         filename = "testoutput.log"
         pat1 = re.compile(PAT1)
         pat2 = re.compile(PAT2)
-        with open(filename) as fd:
-            for lineno, line in enumerate(fd, 1):
+        with open(filename) as stream:
+            for lineno, line in enumerate(stream, 1):
                 match = pat1.search(line)
                 if match:
                     d = match.groupdict()
-                    d['test_case_id'] = "%s.%s" % (d['test_case_id'],
+                    d['test_case_id'] = "%s.%s" % (
+                        d['test_case_id'],
                         d['subtest'])
                     d.pop('subtest')
                     d['test_case_id'] = d['test_case_id'].replace(" ", "_")
@@ -64,9 +73,12 @@ 
                             self.analyze_test_result(d))
 
 
-parse = GtkTestParser(appendall={'units':'seconds', 'result':'pass'})
-inst = TestInstaller(deps=["gtkperf"])
-run = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+installer = TestInstaller(deps=["gtkperf"])
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = GtkTestParser(appendall={'units': 'seconds', 'result': 'pass'})
 
-testobj = Test(test_id="gtkperf", installer=inst,
-                                  runner=run, parser=parse)
+testobj = Test(
+    test_id="gtkperf",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/insanity.py'
--- lava_test/test_definitions/insanity.py	2012-01-19 00:11:33 +0000
+++ lava_test/test_definitions/insanity.py	2012-03-08 19:22:18 +0000
@@ -23,8 +23,6 @@ 
 """
 
 import os
-import re
-import subprocess
 import simplejson
 import gzip
 import base64
@@ -45,19 +43,22 @@ 
 
 # Re-running failing tests is expensive, as it produces very large log files.
 # Only remove --no-reruns if you have a small number of failing tests.
-RUNSTEPS = ["rm -f testrun.db",
-        "gst-media-test --no-reruns -t playbin-test --settings %s" % SETTINGS,
-        "gst-media-test --no-reruns -t full-gnlfilesource-scenario " \
-            "--settings %s" % SETTINGS,
-        "gst-media-test --no-reruns -t simple-encoder-scenario",
-        "echo ////////////////////",
-        "insanity-dumpresults-json testrun.db --all",
-    ]
+RUNSTEPS = [
+    "rm -f testrun.db",
+    "gst-media-test --no-reruns -t playbin-test --settings %s" % SETTINGS,
+    "gst-media-test --no-reruns -t full-gnlfilesource-scenario " \
+    "--settings %s" % SETTINGS,
+    "gst-media-test --no-reruns -t simple-encoder-scenario",
+    "echo ////////////////////",
+    "insanity-dumpresults-json testrun.db --all",
+]
+
 
 class InsanityParser(TestParser):
+
     def parse(self, artifacts):
         filename = "testoutput.log"
-        with open(filename, 'r') as stream:
+        with open(filename) as stream:
             while not stream.readline().startswith("//////////"):
                 pass
             results = simplejson.load(stream)
@@ -68,14 +69,13 @@ 
         self.fixlengths()
         self.attach_logfiles()
 
-
     def fixlengths(self):
         for t in self.results['test_results']:
-            if t.has_key("test_case_id"):
+            if "test_case_id" in t:
                 if len(t["test_case_id"]) > MAX_TEST_CASE_ID_LEN:
                     t["test_case_id"] = \
                             t["test_case_id"][-MAX_TEST_CASE_ID_LEN:]
-            if t.has_key("attributes"):
+            if "attributes" in t:
                 attributes = t["attributes"]
                 for k, v in attributes.items():
                     if len(k) > MAX_ATTR_KEY_LEN:
@@ -87,8 +87,8 @@ 
                         # end tends to be more useful than the start.
                         attributes[k] = v[-MAX_ATTR_VAL_LEN:]
 
-
     def attach_logfiles(self):
+        # FIXME: this should use artifacts.attach_file()
         attachments = []
         mime_type = "text/plain"
         total_attachment_size = 0
@@ -130,20 +130,28 @@ 
         This is really only used for qualitative tests
         """
         for t in self.results['test_results']:
-            if t.has_key("result"):
+            if "result" in t:
                 t['result'] = fixupdict[t['result']]
 
-inst = TestInstaller(deps=["insanity-tools",
+
+installer = TestInstaller(
+    deps=[
+        "insanity-tools",
         "samplemedia-minimal",
-        "gstreamer0.10-plugins-base", # videotestsrc et al
-        "gstreamer0.10-plugins-good", # matroskademux et al
-        "gstreamer0.10-plugins-bad", #
-        "gstreamer0.10-plugins-ugly", # asfdemux et al
-        "gstreamer0.10-ffmpeg", # ffdec_h264 et al
-        "gstreamer0.10-gnonlin", # gnlfilesource
-        "gdb", # debugging backtraces
-        ])
-run = TestRunner(RUNSTEPS)
-parse = InsanityParser("")
+        "gstreamer0.10-plugins-base",  # videotestsrc et al
+        "gstreamer0.10-plugins-good",  # matroskademux et al
+        "gstreamer0.10-plugins-bad",
+        "gstreamer0.10-plugins-ugly",  # asfdemux et al
+        "gstreamer0.10-ffmpeg",  # ffdec_h264 et al
+        "gstreamer0.10-gnonlin",  # gnlfilesource
+        "gdb",  # debugging backtraces
+    ]
+)
+runner = TestRunner(RUNSTEPS)
+parser = InsanityParser()
 
-testobj = Test(test_id="insanity", installer=inst, runner=run, parser=parse)
+testobj = Test(
+    test_id="insanity",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/leb-basic-graphics.py'
--- lava_test/test_definitions/leb-basic-graphics.py	2012-02-07 23:14:44 +0000
+++ lava_test/test_definitions/leb-basic-graphics.py	2012-03-08 19:22:19 +0000
@@ -30,17 +30,24 @@ 
 from lava_test.core.tests import Test
 
 DEFAULT_OPTIONS = ""
-INSTALLSTEPS = ["bzr branch lp:~linaro-foundations/linaro-ubuntu/lava-test-basic-graphics leb-basic-graphics"]
+INSTALLSTEPS = [
+    ("bzr branch"
+     " lp:~linaro-foundations/linaro-ubuntu/lava-test-basic-graphics"
+     " leb-basic-graphics")]
 DEPS = ["bzr", "mesa-utils-extra", "ubuntu-desktop"]
 RUNSTEPS = ["cd leb-basic-graphics; sudo bash -x ./run-test.sh"]
 PATTERN = "(?P<test_case_id>[a-zA-Z0-9_-]+):\s(?P<result>\w+)"
 FIXUPS = {
-        "PASS": "pass",
-        "FAIL": "fail"
-        }
-
-testinst = TestInstaller(INSTALLSTEPS, deps=DEPS)
-testrun = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
-testparser = TestParser(PATTERN, fixupdict = FIXUPS)
-testobj = Test(test_id="leb-basic-graphics", installer=testinst,
-                          runner=testrun, parser=testparser)
+    "PASS": "pass",
+    "FAIL": "fail"
+}
+
+installer = TestInstaller(INSTALLSTEPS, deps=DEPS)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN, fixupdict=FIXUPS)
+
+testobj = Test(
+    test_id="leb-basic-graphics",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/ltp.py'
--- lava_test/test_definitions/ltp.py	2012-01-19 00:11:33 +0000
+++ lava_test/test_definitions/ltp.py	2012-03-08 19:22:21 +0000
@@ -1,4 +1,4 @@ 
-# Copyright (c) 2010, 2011 Linaro
+#, Copyright (c) 2010, 2011 Linaro
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -14,8 +14,8 @@ 
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
-The Linux Test Project is a collection of tools for testing Linux with a 
-focus on the kernel.
+The Linux Test Project is a collection of tools for testing Linux with a focus
+on the kernel.
 
 **URL:** http://ltp.sourceforge.net
 
@@ -31,9 +31,10 @@ 
 from lava_test.core.tests import Test
 
 
-VERSION="20100831"
-URL='http://downloads.sourceforge.net/project/ltp/LTP Source/ltp-%s/ltp-full-%s.bz2' % (VERSION, VERSION)
-MD5="6982c72429a62f3917c13b2d529ad1ce"
+VERSION = "20100831"
+URL = ('http://downloads.sourceforge.net/project/ltp/'
+       'LTP Source/ltp-%s/ltp-full-%s.bz2') % (VERSION, VERSION)
+MD5 = "6982c72429a62f3917c13b2d529ad1ce"
 DEPS = ['bzip2', 'flex', 'bison', 'make', 'build-essential']
 
 SCRIPT = """
@@ -50,19 +51,24 @@ 
                 'chmod +x installltp.sh',
                 './installltp.sh']
 RUNSTEPS = ['cd build && sudo ./runltp $(OPTIONS)']
-PATTERN = "^(?P<test_case_id>\S+)    (?P<subid>\d+)  (?P<result>\w+)  :  (?P<message>.+)"
-FIXUPS = {"TBROK":"fail",
-          "TCONF":"skip",
-          "TFAIL":"fail",
-          "TINFO":"unknown",
-          "TPASS":"pass",
-          "TWARN":"unknown"}
+PATTERN = (
+    "^(?P<test_case_id>\S+)"
+    "    (?P<subid>\d+)"
+    "  (?P<result>\w+)"
+    "  :  (?P<message>.+)")
+FIXUPS = {
+    "TBROK": "fail",
+    "TCONF": "skip",
+    "TFAIL": "fail",
+    "TINFO": "unknown",
+    "TPASS": "pass",
+    "TWARN": "unknown"}
 
 
 class LTPParser(TestParser):
+
     def parse(self, artifacts):
         filename = artifacts.stdout_pathname
-        print "filename=%s" %filename
         pat = re.compile(self.pattern)
         with open(filename, 'r') as fd:
             for line in fd.readlines():
@@ -79,10 +85,13 @@ 
                         self.analyze_test_result(results))
 
 
-ltpinst = TestInstaller(INSTALLSTEPS, deps=DEPS, url=URL,
-                                           md5=MD5)
-ltprun = TestRunner(RUNSTEPS,default_options = DEFAULT_OPTIONS)
-ltpparser = LTPParser(PATTERN, fixupdict = FIXUPS)
-testobj = Test(test_id="ltp", test_version=VERSION,
-                                  installer=ltpinst, runner=ltprun,
-                                  parser=ltpparser)
+installer = TestInstaller(INSTALLSTEPS, deps=DEPS, url=URL, md5=MD5)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = LTPParser(PATTERN, fixupdict=FIXUPS)
+
+testobj = Test(
+    test_id="ltp",
+    test_version=VERSION,
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/lttng.py'
--- lava_test/test_definitions/lttng.py	2012-02-02 06:28:35 +0000
+++ lava_test/test_definitions/lttng.py	2012-03-08 19:22:23 +0000
@@ -28,14 +28,19 @@ 
 
 DEFAULT_OPTIONS = ""
 
-INSTALLSTEPS = ["apt-get build-dep lttng-tools --yes",
-                "bzr branch lp:~linaro-foundations/linaro-ubuntu/lava-test-lttng"]
+INSTALLSTEPS = [
+    "apt-get build-dep lttng-tools --yes",
+    "bzr branch lp:~linaro-foundations/linaro-ubuntu/lava-test-lttng"]
 DEPS = ["bzr", "linux-headers-$(uname -r)", "lttng-modules-dkms"]
 RUNSTEPS = ["cd lava-test-lttng; sudo bash -x ./run-test.sh"]
 PATTERN = "^(?P<test_case_id>[\w:()]+)\s+\-\s+(?P<result>\w+$)"
 
-lttnginst = TestInstaller(INSTALLSTEPS, deps=DEPS)
-lttngrun = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
-lttngparser = TestParser(PATTERN)
-testobj = Test(test_id="lttng", installer=lttnginst,
-                                  runner=lttngrun, parser=lttngparser)
+installer = TestInstaller(INSTALLSTEPS, deps=DEPS)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN)
+
+testobj = Test(
+    test_id="lttng",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/peacekeeper.py'
--- lava_test/test_definitions/peacekeeper.py	2012-01-19 00:11:33 +0000
+++ lava_test/test_definitions/peacekeeper.py	2012-03-08 19:22:25 +0000
@@ -33,17 +33,18 @@ 
 curdir = os.path.realpath(os.path.dirname(__file__))
 
 DEFAULT_OPTIONS = "firefox"
-INSTALLSTEPS = ['cp -rf %s/peacekeeper/* .'%curdir]
+INSTALLSTEPS = ['cp -rf %s/peacekeeper/* .' % curdir]
 RUNSTEPS = ['python peacekeeper_runner.py $(OPTIONS)']
-DEPS = ['python-ldtp','firefox']
-
-my_installer = TestInstaller(INSTALLSTEPS, deps=DEPS)
-my_runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+DEPS = ['python-ldtp', 'firefox']
 
 PATTERN = "^(?P<result>\w+): Score = (?P<measurement>\d+)"
 
-my_parser = TestParser(PATTERN,
-                                          appendall={'units':'point'})
+installer = TestInstaller(INSTALLSTEPS, deps=DEPS)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN, appendall={'units': 'point'})
 
-testobj = Test(test_id="peacekeeper", installer=my_installer,
-                                  runner=my_runner, parser=my_parser)
+testobj = Test(
+    test_id="peacekeeper",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/peacekeeper/peacekeeper_runner.py'
--- lava_test/test_definitions/peacekeeper/peacekeeper_runner.py	2011-10-19 07:29:15 +0000
+++ lava_test/test_definitions/peacekeeper/peacekeeper_runner.py	2012-03-11 06:26:42 +0000
@@ -3,25 +3,25 @@ 
 import re
 import sys
 import time
-from optparse import OptionParser
+# FIXME: convert to explicit import
 from ldtp import *
 from urllib import urlopen
 
 chromium_data = {
-    "cmd":"chromium-browser",
-    "title":"*Chromium",
-    "urlfield":"txt0"
+    "cmd": "chromium-browser",
+    "title": "*Chromium",
+    "urlfield": "txt0"
 }
 
 firefox_data = {
-    "cmd":"firefox",
-    "title":"*Firefox",
-    "urlfield":"txtGotoaWebSite"
+    "cmd": "firefox",
+    "title": "*Firefox",
+    "urlfield": "txtGotoaWebSite"
 }
 
 browser_data = {
-    "firefox":firefox_data,
-    "chromium":chromium_data
+    "firefox": firefox_data,
+    "chromium": chromium_data
 }
 
 site = "http://service.futuremark.com/peacekeeper/run.action"
@@ -36,29 +36,29 @@ 
 
 launchapp(browser["cmd"], [site])
 
-if not waittillguiexist(browser["title"], guiTimeOut = 60):
+if not waittillguiexist(browser["title"], guiTimeOut=60):
     print "Error: Program never started"
     sys.exit(-1)
 
 result_url = gettextvalue(browser["title"], browser["urlfield"])
-wait_loop = 60 # 60 * 30 seconds = 15 minutes
+wait_loop = 60  # 60 * 30 seconds = 15 minutes
 time.sleep(10)
 
 while not re.search('results.action', result_url) and wait_loop > 0:
     result_url = gettextvalue(browser["title"], browser["urlfield"])
-    print "waiting %d ..."%wait_loop
+    print "waiting %d ..." % wait_loop
     time.sleep(30)
-    wait_loop = wait_loop-1
+    wait_loop = wait_loop - 1
 
 closewindow(browser["title"])
 
-print "result_url = %s" %result_url
+print "result_url = %s" % result_url
 
 # if the url not start with http ? append http to it.
 
 if result_url.find("http") != 0:
     result_url = "http://" + result_url
-    
+
 if wait_loop > 0:
     fd = urlopen(result_url)
     data = fd.read()

=== modified file 'lava_test/test_definitions/perf.py'
--- lava_test/test_definitions/perf.py	2012-01-17 06:37:14 +0000
+++ lava_test/test_definitions/perf.py	2012-03-08 19:22:26 +0000
@@ -20,12 +20,22 @@ 
 
 DEFAULT_OPTIONS = ""
 DEPS = ["linux-tools"]
-RUNSTEPS = ["export PERFBIN=`find /usr/bin/ -name perf_* | sort -r | head -n 1 | xargs basename`; $PERFBIN test 2>&1  $(OPTIONS)"]
+
+RUNSTEPS = [
+    ("export PERFBIN=`find /usr/bin/ -name perf_*"
+     " | sort -r"
+     " | head -n 1"
+     " | xargs basename`;"
+     " $PERFBIN test 2>&1 $(OPTIONS)")]
+
 PATTERN = "^ \d+:\s+(?P<test_case_id>[\w\s]+):\W+(?P<message>\w+)"
 
-perfinst = TestInstaller(deps=DEPS)
-perfrun = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
-perfparser = TestParser(PATTERN,
-                   appendall={"result":"pass"})
-testobj = Test(test_id="perf", installer=perfinst,
-                                  runner=perfrun, parser=perfparser)
+installer = TestInstaller(deps=DEPS)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN, appendall={"result": "pass"})
+
+testobj = Test(
+    test_id="perf",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/posixtestsuite.py'
--- lava_test/test_definitions/posixtestsuite.py	2011-10-20 12:12:15 +0000
+++ lava_test/test_definitions/posixtestsuite.py	2012-03-08 19:22:27 +0000
@@ -30,28 +30,32 @@ 
 from lava_test.core.tests import Test
 
 
-VERSION="20100831"
-URL= "http://downloads.sourceforge.net/project/ltp/LTP Source/ltp-%s/"\
-     "ltp-full-%s.bz2" % (VERSION, VERSION)
-MD5="6982c72429a62f3917c13b2d529ad1ce"
+VERSION = "20100831"
+URL = ("http://downloads.sourceforge.net/project/ltp/LTP Source/ltp-%s/"
+      "ltp-full-%s.bz2") % (VERSION, VERSION)
+MD5 = "6982c72429a62f3917c13b2d529ad1ce"
 DEFAULT_OPTIONS = ""
 INSTALLSTEPS = ['tar -xjf ltp-full-20100831.bz2']
 DEPS = ['gcc', 'bzip2']
-RUNSTEPS = ['cd ltp-full-20100831/testcases/open_posix_testsuite/ && make $(OPTIONS)']
+RUNSTEPS = [
+    ('cd ltp-full-20100831/testcases/open_posix_testsuite/'
+     ' && make $(OPTIONS)')]
 
-PATTERN = "((?P<test_case_id>\A(\w+[/]+)+\w+[-]*\w*[-]*\w*) .*? (?P<result>\w+))"
+PATTERN = ("((?P<test_case_id>\A(\w+[/]+)+\w+[-]*\w*[-]*\w*)"
+           " .*? (?P<result>\w+))")
 FIXUPS = {
-            "FAILED"      :  "fail",
-            "INTERRUPTED" :  "skip",
-            "PASSED"      :  "pass",
-            "UNRESOLVED"  :  "unknown",
-            "UNSUPPORTED" :  "skip",
-            "UNTESTED"    :  "skip",
-            "SKIPPING"    :  "skip"
-         }
+    "FAILED": "fail",
+    "INTERRUPTED": "skip",
+    "PASSED": "pass",
+    "UNRESOLVED": "unknown",
+    "UNSUPPORTED": "skip",
+    "UNTESTED": "skip",
+    "SKIPPING": "skip"
+}
 
 
 class PosixParser(TestParser):
+
     def parse(self, artifacts):
         filename = "testoutput.log"
         pat = re.compile(self.pattern)
@@ -68,10 +72,14 @@ 
                 self.results['test_results'].append(
                     self.analyze_test_result(results))
 
-posix_inst = TestInstaller(INSTALLSTEPS, deps=DEPS,
-    url=URL, md5=MD5)
-posix_run = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
-posixparser = PosixParser(PATTERN, fixupdict = FIXUPS)
-testobj = Test(test_id="posixtestsuite", test_version=VERSION,
-                                  installer=posix_inst, runner=posix_run,
-                                  parser=posixparser)
+
+installer = TestInstaller(INSTALLSTEPS, deps=DEPS, url=URL, md5=MD5)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = PosixParser(PATTERN, fixupdict=FIXUPS)
+
+testobj = Test(
+    test_id="posixtestsuite",
+    test_version=VERSION,
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/pwrmgmt.py'
--- lava_test/test_definitions/pwrmgmt.py	2012-01-11 19:49:35 +0000
+++ lava_test/test_definitions/pwrmgmt.py	2012-03-08 19:22:29 +0000
@@ -26,9 +26,6 @@ 
 RUNSTEPS = ['cd pm-qa && make check $(OPTIONS)']
 DEPS = ['git-core', 'make', 'linux-libc-dev', 'util-linux', 'build-essential']
 
-pwrmgmtinst = TestInstaller(INSTALLSTEPS, deps=DEPS)
-pwrmgmtrun = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
-
 # test case name is before  ":" , the test log is between ":" and "...",
 # the result is after "..."
 # Each test case is separated with a test description beginning with "#"
@@ -43,11 +40,20 @@ 
 #cpufreq_02.0/cpu1: checking scaling_available_governors exists...        pass
 #cpufreq_02.1/cpu1: checking scaling_governor exists...                   pass
 
-PATTERN = "^(?P<test_case_id>[\w/\.]+):\s+(?P<message>.+)\.\.\.\s+(?P<result>\w+)"
-
-
-pwrmgmtparser = TestParser(PATTERN,
-    appendall={'result':'pass'})
-
-testobj = Test(test_id="pwrmgmt", installer=pwrmgmtinst,
-                                  runner=pwrmgmtrun, parser=pwrmgmtparser)
+PATTERN = (
+    "^(?P<test_case_id>[\w/\.]+):"
+    "\s+"
+    "(?P<message>.+)"
+    "\.\.\.\s+"
+    "(?P<result>\w+)")
+
+
+installer = TestInstaller(INSTALLSTEPS, deps=DEPS)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN, appendall={'result': 'pass'})
+
+testobj = Test(
+    test_id="pwrmgmt",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/pybench.py'
--- lava_test/test_definitions/pybench.py	2011-10-20 12:12:15 +0000
+++ lava_test/test_definitions/pybench.py	2012-03-08 19:22:30 +0000
@@ -26,16 +26,13 @@ 
 from lava_test.core.runners import TestRunner
 from lava_test.core.tests import Test
 
-
-VERSION='r27'
-URL="http://svn.python.org/projects/python/tags/%s/Tools/pybench/" %(VERSION)
+VERSION = 'r27'
+URL = "http://svn.python.org/projects/python/tags/%s/Tools/pybench/" % VERSION
 DEFAULT_OPTIONS = ""
-INSTALLSTEPS = ["svn export %s" %(URL)]
+INSTALLSTEPS = ["svn export %s" % URL]
 RUNSTEPS = ['python pybench/pybench.py $(OPTIONS)']
 DEPS = ['subversion']
 
-my_installer = TestInstaller(INSTALLSTEPS, deps=DEPS)
-my_runner = TestRunner(RUNSTEPS,default_options=DEFAULT_OPTIONS)
 
 # test case name is first column and measurement is average column
 #
@@ -43,9 +40,21 @@ 
 #         BuiltinFunctionCalls:     85ms    151ms    0.30us    0.147ms
 #         BuiltinMethodLookup:      68ms    113ms    0.11us    0.171ms
 
-PATTERN = "^\s+(?P<test_case_id>\w+):\s+(\d+)ms\s+(?P<measurement>\d+)ms"
-
-my_parser = TestParser(PATTERN, appendall={'units':'ms','result':'pass'})
-
-testobj = Test(test_id="pybench", installer=my_installer,
-               runner=my_runner, parser=my_parser)
+PATTERN = (
+    "^\s+"
+    "(?P<test_case_id>\w+)"
+    ":"
+    "\s+(\d+)ms"
+    "\s+"
+    "(?P<measurement>\d+)ms")
+
+
+installer = TestInstaller(INSTALLSTEPS, deps=DEPS)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN, appendall={'units': 'ms', 'result': 'pass'})
+
+testobj = Test(
+    test_id="pybench",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/smem.py'
--- lava_test/test_definitions/smem.py	2011-10-20 12:12:15 +0000
+++ lava_test/test_definitions/smem.py	2012-03-08 19:22:32 +0000
@@ -23,10 +23,12 @@ 
 PATTERN = "^(?P<test_case_id>(\w+\s)+)\s\s+(?P<measurement>\d+)"
 DEPS = ['smem']
 
+installer = TestInstaller(deps=DEPS)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN, appendall={'units': 'KB', 'result': 'pass'})
 
-smeminst = TestInstaller(deps=DEPS)
-smemrun = TestRunner(RUNSTEPS,default_options=DEFAULT_OPTIONS)
-smemparser = TestParser(PATTERN,
-               appendall={'units':'KB', 'result':'pass'})
-testobj = Test(test_id="smem", installer=smeminst,
-                                  runner=smemrun, parser=smemparser)
+testobj = Test(
+    test_id="smem",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/stream.py'
--- lava_test/test_definitions/stream.py	2011-10-20 12:12:15 +0000
+++ lava_test/test_definitions/stream.py	2012-03-08 19:22:33 +0000
@@ -18,18 +18,19 @@ 
 from lava_test.core.runners import TestRunner
 from lava_test.core.tests import Test
 
-URL="http://www.cs.virginia.edu/stream/FTP/Code/stream.c"
+URL = "http://www.cs.virginia.edu/stream/FTP/Code/stream.c"
 INSTALLSTEPS = ['cc stream.c -O2 -fopenmp -o stream']
 DEPS = ['gcc', 'build-essential']
 DEFAULT_OPTIONS = ""
 RUNSTEPS = ['./stream $(OPTIONS)']
 PATTERN = "^(?P<test_case_id>\w+):\W+(?P<measurement>\d+\.\d+)"
 
-streaminst = TestInstaller(INSTALLSTEPS, deps=DEPS, url=URL)
-streamrun = TestRunner(RUNSTEPS,default_options=DEFAULT_OPTIONS)
-streamparser = TestParser(PATTERN,
-               appendall={'units':'MB/s', 'result':'pass'})
-testobj = Test(test_id="stream", installer=streaminst,
-                                  runner=streamrun, parser=streamparser)
-
-
+installer = TestInstaller(INSTALLSTEPS, deps=DEPS, url=URL)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN, appendall={'units': 'MB/s', 'result': 'pass'})
+
+testobj = Test(
+    test_id="stream",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/tiobench.py'
--- lava_test/test_definitions/tiobench.py	2011-10-20 18:45:45 +0000
+++ lava_test/test_definitions/tiobench.py	2012-03-08 19:22:35 +0000
@@ -28,29 +28,33 @@ 
 from lava_test.core.tests import Test
 
 
-VERSION="0.3.3"
-URL="http://prdownloads.sourceforge.net/tiobench/tiobench-%s.tar.gz" %(VERSION)
-MD5="bf485bf820e693c79e6bd2a38702a128"
+VERSION = "0.3.3"
+URL = ("http://prdownloads.sourceforge.net/tiobench/"
+       "tiobench-%s.tar.gz") % VERSION
+MD5 = "bf485bf820e693c79e6bd2a38702a128"
 DEFAULT_OPTIONS = "--block=4096 --block=8192 --threads=2 --numruns=2"
-INSTALLSTEPS = ['tar -zxvf tiobench-%s.tar.gz' % VERSION,
-                'cd tiobench-%s && make' % VERSION]
-RUNSTEPS = ["cd tiobench-%s && "\
-            "./tiobench.pl $(OPTIONS)" % (VERSION)]
+INSTALLSTEPS = [
+    'tar -zxvf tiobench-%s.tar.gz' % VERSION,
+    'cd tiobench-%s && make' % VERSION]
+RUNSTEPS = [(
+    "cd tiobench-%s && "
+    "./tiobench.pl $(OPTIONS)") % VERSION]
 
 
 class TIObenchTestParser(TestParser):
+
     def parse(self, artifacts):
         # Pattern to match the test case name
-        pattern1="(?P<test_id>^(Sequential|Random) (Writes|Reads))"
+        pattern1 = "(?P<test_id>^(Sequential|Random) (Writes|Reads))"
         # Pattern to match the parameter details and measurement
-        pattern2=".*?(?P<file_size>\d+)\s+(?P<blks_size>\d+)\s+.*?  "\
-                 "(?P<measurement>((\d|#)+\.?\d*))"
+        pattern2 = (".*?(?P<file_size>\d+)\s+(?P<blks_size>\d+)\s+.*?  "
+                    "(?P<measurement>((\d|#)+\.?\d*))")
         filename = "testoutput.log"
         pat1 = re.compile(pattern1)
         pat2 = re.compile(pattern2)
         tc_id = None
-        with open(filename) as fd:
-            for lineno, line in enumerate(fd, 1):
+        with open(filename) as stream:
+            for lineno, line in enumerate(stream, 1):
                 match1 = pat1.match(line)
                 match2 = pat2.search(line)
                 if match1:
@@ -67,9 +71,13 @@ 
                     self.results['test_results'].append(
                         self.analyze_test_result(results))
 
-tiobench_inst = TestInstaller(INSTALLSTEPS, url=URL,
-    md5=MD5)
-tiobench_run = TestRunner(RUNSTEPS,default_options=DEFAULT_OPTIONS)
-parse = TIObenchTestParser(appendall={'units':'MB/s', 'result':'pass'})
-testobj = Test(test_id="tiobench", test_version=VERSION,
-    installer=tiobench_inst, runner=tiobench_run, parser=parse)
+installer = TestInstaller(INSTALLSTEPS, url=URL, md5=MD5)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TIObenchTestParser(appendall={'units': 'MB/s', 'result': 'pass'})
+
+testobj = Test(
+    test_id="tiobench",
+    test_version=VERSION,
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/wifi-enablement.py'
--- lava_test/test_definitions/wifi-enablement.py	2012-02-07 23:12:00 +0000
+++ lava_test/test_definitions/wifi-enablement.py	2012-03-08 19:22:36 +0000
@@ -31,17 +31,29 @@ 
 from lava_test.core.tests import Test
 
 DEFAULT_OPTIONS = ""
-INSTALLSTEPS = ["bzr branch lp:~linaro-foundations/linaro-ubuntu/lava-test-wifi-enablement wifi-enablement"]
-DEPS = ["bzr", "wpasupplicant", "isc-dhcp-client", "wireless-tools", "net-tools"]
+INSTALLSTEPS = [(
+    "bzr branch"
+    " lp:~linaro-foundations/linaro-ubuntu/lava-test-wifi-enablement"
+    " wifi-enablement")]
+DEPS = [
+    "bzr",
+    "wpasupplicant",
+    "isc-dhcp-client",
+    "wireless-tools",
+    "net-tools"]
 RUNSTEPS = ["cd wifi-enablement; sudo bash -x ./run-test.sh"]
 PATTERN = "(?P<test_case_id>[a-zA-Z0-9_-]+):\s(?P<result>\w+)"
 FIXUPS = {
-        "PASS": "pass",
-        "FAIL": "fail"
-        }
-
-testinst = TestInstaller(INSTALLSTEPS, deps=DEPS)
-testrun = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
-testparser = TestParser(PATTERN, fixupdict = FIXUPS)
-testobj = Test(test_id="wifi-enablement", installer=testinst,
-                          runner=testrun, parser=testparser)
+    "PASS": "pass",
+    "FAIL": "fail"
+}
+
+installer = TestInstaller(INSTALLSTEPS, deps=DEPS)
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN, fixupdict=FIXUPS)
+
+testobj = Test(
+    test_id="wifi-enablement",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/x11perf.py'
--- lava_test/test_definitions/x11perf.py	2011-10-20 12:12:15 +0000
+++ lava_test/test_definitions/x11perf.py	2012-03-08 19:22:38 +0000
@@ -44,17 +44,18 @@ 
     "-shmputxy500",
 
     "-scroll500",
-    ]
+]
 
 DEFAULT_OPTIONS = "%s %s" % (x11perf_options,  " ".join(x11perf_tests))
 RUNSTEPS = ["x11perf $(OPTIONS)"]
 PATTERN = "trep @.*\(\W*(?P<measurement>\d+.\d+)/sec\):\W+(?P<test_case_id>.+)"
 
-inst = TestInstaller(deps=["x11-apps"])
-run = TestRunner(RUNSTEPS,default_options=DEFAULT_OPTIONS)
-parse = TestParser(PATTERN,
-                                      appendall={'units':'reps/s',
-                                                 'result':'pass'})
+installer = TestInstaller(deps=["x11-apps"])
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN, appendall={'units': 'reps/s', 'result': 'pass'})
 
-testobj = Test(test_id="x11perf", installer=inst,
-                                  runner=run, parser=parse)
+testobj = Test(
+    test_id="x11perf",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/test_definitions/xrestop.py'
--- lava_test/test_definitions/xrestop.py	2012-01-19 00:11:33 +0000
+++ lava_test/test_definitions/xrestop.py	2012-03-08 19:22:40 +0000
@@ -31,9 +31,12 @@ 
 PATTERN = "^(?P<test_case_id>\w+):\W+(?P<measurement>\d+)"
 DEFAULT_OPTIONS = ""
 
-xrestopinst = TestInstaller(INSTALLSTEPS, deps=["xrestop"])
-xrestoprun = TestRunner(RUNSTEPS,default_options=DEFAULT_OPTIONS)
-xrestopparser = TestParser(PATTERN,
-                   appendall={'units':'KB', 'result':'pass'})
-testobj = Test(test_id="xrestop", installer=xrestopinst,
-                                  runner=xrestoprun, parser=xrestopparser)
+installer = TestInstaller(INSTALLSTEPS, deps=["xrestop"])
+runner = TestRunner(RUNSTEPS, default_options=DEFAULT_OPTIONS)
+parser = TestParser(PATTERN, appendall={'units': 'KB', 'result': 'pass'})
+
+testobj = Test(
+    test_id="xrestop",
+    installer=installer,
+    runner=runner,
+    parser=parser)

=== modified file 'lava_test/utils.py'
--- lava_test/utils.py	2012-03-08 09:21:36 +0000
+++ lava_test/utils.py	2012-03-08 19:17:28 +0000
@@ -20,7 +20,6 @@ 
 import shutil
 import urllib2
 import urlparse
-import sys
 
 _fake_files = None
 _fake_paths = None
@@ -195,7 +194,7 @@ 
 
         stream = open(os.path.join(self.cache_dir, key), mode)
         return stream
-    
+
     def _key_for_url(self, url):
         return hashlib.sha1(url).hexdigest()