backport narval branch into default apycot-version-2.0.0
authorSylvain Thénault <sylvain.thenault@logilab.fr>
Fri, 10 Sep 2010 14:14:42 +0200
changeset 607 81ff439e47b3
parent 506 0659f38debf7 (current diff)
parent 606 b74c17b45821 (diff)
child 608 d46f79550253
backport narval branch into default
logformat.py
migration/0.2.0_Any.py
migration/precreate.py
schema/_regproc.postgres.sql
--- a/MANIFEST.in	Wed Jul 28 12:10:03 2010 +0200
+++ b/MANIFEST.in	Fri Sep 10 14:14:42 2010 +0200
@@ -1,9 +1,11 @@
 include *.py
+include narval/*.py
 recursive-include views *.py
-recursive-include doc *.dia
 recursive-include entities *.py
 recursive-include migration *.py
-recursive-include schema *.sql
-recursive-include data external_resources *.gif *.png *.css *.ico *.js *.svg
+recursive-include _apycotlib *.py
+recursive-include ext *.js
+recursive-include data *.gif *.png *.css *.ico *.js *.svg
 recursive-include i18n *.pot *.po
+recursive-include doc *
 recursive-include wdoc *
--- a/__init__.py	Wed Jul 28 12:10:03 2010 +0200
+++ b/__init__.py	Fri Sep 10 14:14:42 2010 +0200
@@ -1,1 +1,13 @@
 """cubicweb-apycot"""
+from cubicweb.schema import ETYPE_NAME_MAP
+ETYPE_NAME_MAP['TestConfigGroup'] = 'TestConfig'
+
+
+try:
+    # development version
+    import _apycotlib
+except ImportError:
+    pass
+else:
+    import sys
+    sys.modules['apycotlib'] = _apycotlib
--- a/__pkginfo__.py	Wed Jul 28 12:10:03 2010 +0200
+++ b/__pkginfo__.py	Fri Sep 10 14:14:42 2010 +0200
@@ -2,9 +2,9 @@
 """cubicweb-apycot application packaging information"""
 
 modname = 'apycot'
-distname = 'cubicweb-apycot'
+distname = 'apycot'
 
-numversion = (1, 10, 0)
+numversion = (2, 0, 0)
 version = '.'.join(str(num) for num in numversion)
 
 license = 'GPL'
@@ -22,7 +22,9 @@
 __depends__ = {'pyro': None,
                'cubicweb': '>= 3.9.0',
                'cubicweb-vcsfile': '>= 0.9.0',
-               'cubicweb-file': None}
+               'cubicweb-file': None,
+               'cubicweb-narval': None,
+               }
 __recommends__ = {'cubicweb-tracker': None,
                   'cubicweb-nosylist': '>= 0.5.0'}
 
@@ -51,3 +53,8 @@
         data_files.append([join(THIS_CUBE_DIR, dirname), listdir(dirname)])
 # Note: here, you'll need to add subdirectories if you want
 # them to be included in the debian package
+
+if isdir('narval'):
+    data_files.append([join('var', 'lib', 'narval', 'plugins'), listdir('narval')])
+if isdir('ext'):
+    data_files.append([join('share', 'apycot', 'ext'), listdir('ext')])
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/_apycotlib/__init__.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,341 @@
+# Copyright (c) 2003-2010 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+"""APyCoT, A Pythonic Code Tester
+
+this is the bot part of the code tester, responsible to execute checks
+"""
+__docformat__ = "restructuredtext en"
+
+import os
+import sys
+import stat
+from os.path import exists, join, dirname
+from subprocess import STDOUT, Popen
+from tempfile import TemporaryFile
+
+from logilab.common.textutils import splitstrip
+
+from cubes import narval # ensure narvalbot is importable
+
+from narvalbot.prototype import action, input
+
+# regitry of available repositories, preprocessors and checkers
+
+REGISTRY = {'repository': {},
+            'preprocessor': {},
+            'checker': {}}
+
+def register(category, klass):
+    """register a class"""
+    REGISTRY[category][klass.id] = klass
+
+def get_registered(category, name):
+    """get a object by name"""
+    try:
+        return REGISTRY[category][name]
+    except KeyError:
+        raise ConfigError('No object %r in category %r' % (name, category))
+
+# apycot standard exception ####################################################
+
+class ConfigError(Exception):
+    """exception due to a wrong user configuration"""
+
+class SetupException(Exception):
+    """raised in the setup step"""
+
+
+def not_executed_checker(id):
+    """useful helper decorator to force execution to report further action
+    failures into apycot report
+    """
+    def function(step, id=id):
+        plan = step.plan
+        if hasattr(plan, 'apycot'):
+            test = plan.apycot
+            test.run_checker(id, nonexecuted=True)
+    return function
+
+def apycotaction(id, *conditions):
+    """apycotaction : narval action + 'apycot' input being the :class:`Test`
+    instance and 'options' optional input containing configuration options
+    (:class:`narvalbot.elements.Options`)
+    """
+    def decorator(func, id=id):
+        return input('apycot', 'isinstance(elmt, Test)', *conditions)(
+            input('options', 'isinstance(elmt, Options)', optional=True)(
+                action('apycot.%s' % id, nonexecuted=not_executed_checker(id))(func)
+                )
+            )
+    return decorator
+
+
+# check statuses ###############################################################
+
+class TestStatus(object):
+    __all = {}
+
+    def __init__(self, name, order, nonzero):
+        self.name = name
+        self.order = order
+        self.nonzero = nonzero
+        self.__all[name] = self
+
+    def __int__(self):
+        return self.order
+
+    def __nonzero__(self):
+        return self.nonzero
+
+    def __str__(self):
+        return self.name
+
+    def __repr__(self):
+        return "<TestStatus %s>" % self.name
+
+    def __cmp__(self, other):
+        return cmp(int(self), int(other))
+
+    @classmethod
+    def get(cls, name):
+        return cls.__all.get(name)
+
+# keep order for bw compat
+SKIPPED = TestStatus('skipped',  -5, False) # Checker not even launched
+KILLED  = TestStatus('killed',   -3, False) # Checker killed (for limit reason)
+ERROR   = TestStatus('error',    -1, False) # Unexpected error during chks exec
+FAILURE = TestStatus("failure",   0, False) # Project failed the check
+NODATA  = TestStatus('nodata',    2, False) # No data found in the project
+PARTIAL = TestStatus('partial',   5, True)  # Project partially pass th check
+SUCCESS = TestStatus('success',  10, True)  # Project succeed the check
+
+
+# base class for all apycot objects ############################################
+
+_MARKER = ()
+
+class ApycotObject(object):
+    """base class for apycot checkers / preprocessors"""
+    options_def = {}
+    status = None
+
+    def __init__(self, writer, options=None):
+        self.writer = writer
+        if options is None:
+            options = {}
+        self.options = options
+
+    @staticmethod
+    def merge_status(global_status, status):
+        if global_status is None:
+            return status
+        elif status is None:
+            return global_status
+        else:
+            return min(global_status, status)
+
+    def set_status(self, status):
+        self.status = self.merge_status(self.status, status)
+
+    def record_version_info(self, versionof, version):
+        self.writer.info(version, path=versionof)
+
+    def check_options(self):
+        """check mandatory options have a value (I know I know...) and set
+        defaults
+        """
+        for optname, optdict in self.options_def.iteritems():
+            assert hasattr(optdict, 'get'), "optdict : %s ; self.options : %s" % (optdict, self.options)
+            if optdict.get('required') and not self.options.get(optname):
+                raise ConfigError('missing/empty value for option %r' % optname)
+            if not optname in self.options:
+                self.options[optname] = optdict.get('default')
+            opttype = optdict.get('type')
+            if opttype is None:
+                continue
+            if opttype == 'int':
+                self.options[optname] = int(self.options[optname])
+            elif opttype == 'csv':
+                if self.options[optname]:
+                    if isinstance(self.options[optname], basestring):
+                        self.options[optname] = splitstrip(self.options[optname])
+                else:
+                    self.options[optname] = []
+            else:
+                raise Exception('Unknow option type %s for %s' % (opttype, optname))
+
+
+# base class for external commands handling ####################################
+
+class OutputParser(ApycotObject):
+    non_zero_status_code = ERROR
+    status = SUCCESS
+
+    def __init__(self, writer, options=None, path=None):
+        super(OutputParser, self).__init__(writer, options)
+        self.unparsed = None
+        self.path = path
+
+    def parse_line(self, line):
+        self.unparsed.append(line.strip())
+
+    def parse(self, stream):
+        self.unparsed = []
+        self._parse(stream)
+        return self.status
+
+    def _parse(self, stream):
+        for line in stream:
+            line = line.strip()
+            if line:
+                self.parse_line(unicode(line, 'utf8', 'replace'))
+
+class SimpleOutputParser(OutputParser):
+
+    PREFIX_INFO = ('I',)
+    PREFIX_WARNING = ('W',)
+    PREFIX_FAILURE = ('E',)
+    PREFIX_FATAL = ('F', 'C')
+
+    def map_message(self, mtype, msg):
+        if mtype in self.PREFIX_INFO:
+            self.writer.info(msg, path=self.path)
+        elif mtype in self.PREFIX_WARNING:
+            self.writer.warning(msg, path=self.path)
+        elif mtype in self.PREFIX_FAILURE:
+            self.writer.error(msg, path=self.path)
+            self.set_status(FAILURE)
+        elif mtype in self.PREFIX_FATAL:
+            self.writer.fatal(msg, path=self.path)
+            self.set_status(FAILURE)
+        elif msg:
+            self.unparsed.append(msg)
+
+    def parse_line(self, line):
+        line_parts = line.split(':', 1)
+        if len(line_parts) > 1:
+            mtype, msg = line_parts
+            self.map_message(mtype.strip(), msg.strip())
+        else:
+            self.unparsed.append(line.strip())
+
+
+class Command(ApycotObject):
+    non_zero_status_code = ERROR
+    status = SUCCESS
+
+    def __init__(self, writer, command, parsed_content='merged',
+                 raises=False, shell=False, path=None):
+        super(Command, self).__init__(writer)
+        assert command, command
+        self.command = command
+        self.parsed_content = parsed_content
+        self.raises = raises
+        self.shell = shell
+        self.path = path
+        self._cmd_printed = False
+
+    @property
+    def commandstr(self):
+        if not isinstance(self.command, basestring):
+            return ' '.join(self.command)
+        return self.command
+
+    def run(self):
+        """actually run the task by spawning a subprocess"""
+        os.environ['LC_ALL'] = 'fr_FR.UTF-8' # XXX force utf-8
+        outfile = TemporaryFile(mode='w+', bufsize=0)
+        if self.parsed_content == 'merged':
+            errfile = STDOUT
+        else:
+            errfile = TemporaryFile(mode='w+', bufsize=0)
+        try:
+            cmd = Popen(self.command, bufsize=0, stdout=outfile, stderr=errfile, stdin=open('/dev/null', 'a'), shell=self.shell)
+        except OSError, err:
+            raise ConfigError(err)
+        cmd.communicate()
+        if self.parsed_content == 'merged':
+            outfile.seek(0)
+            self.handle_output(cmd.returncode, outfile, None)
+        else:
+            for stream in (outfile, errfile):
+                stream.seek(0)
+            if not os.fstat(errfile.fileno())[stat.ST_SIZE]:
+                errfile = None
+            if not os.fstat(outfile.fileno())[stat.ST_SIZE]:
+                outfile = None
+            self.handle_output(cmd.returncode, outfile, errfile)
+        return self.status
+
+    def handle_output(self, status, stdout, stderr):
+        stdout, stderr, unparsed = self.process_output(stdout, stderr)
+        cmd = self.commandstr
+        path = self.path or cmd
+        if status:
+            if status > 0:
+                short_msg = u'`%s` returned with status : %s' % (cmd, status)
+                cmd_status = self.non_zero_status_code
+            else:
+                # negative status mean the process have been killed by a signal
+                short_msg = u'`%s` killed by signal %s' % (cmd, status)
+                # Get the signal number
+                status *= -1
+                cmd_status = KILLED
+                # XXX we need detection of common limit here
+            msg = self.append_output_messages(short_msg, stdout, stderr, unparsed)
+            self.writer.error(msg, path=path)
+            self.set_status(cmd_status)
+            if self.raises:
+                raise SetupException(short_msg)
+        else:
+            msg = self.append_output_messages(u'`%s` executed successfuly' % cmd,
+                                              stdout, stderr, unparsed)
+            self.writer.debug(msg, path=path)
+
+    def append_output_messages(self, msg, stdout, stderr, unparsed):
+        if stdout is not None:
+            stdout = unicode(stdout.read(), 'utf8', 'replace')
+            if self.parsed_content == 'merged':
+                msg += u'\noutput:\n%s' % stdout
+            else:
+                msg += u'\nstandard output:\n%s' % stdout
+        if stderr is not None:
+            stderr = unicode(stderr.read(), 'utf8', 'replace')
+            msg += u'\nerror output:\n%s' % stderr
+        if unparsed:
+            msg += u'\nunparsed output:\n%s' % unparsed
+        return msg
+
+    def process_output(self, stdout, stderr):
+        return stdout, stderr, None
+
+
+class ParsedCommand(Command):
+    def __init__(self, writer, command, parsercls=SimpleOutputParser, **kwargs):
+        Command.__init__(self,  writer, command, **kwargs)
+        self.parser = parsercls(writer)
+        self.parser.path = self.path
+        self.non_zero_status_code = self.parser.non_zero_status_code
+
+    def process_output(self, stdout, stderr):
+        if stdout is not None and self.parsed_content in ('stdout', 'merged'):
+            self.status = self.parser.parse(stdout)
+            return None, stderr, u'\n'.join(self.parser.unparsed)
+        if stderr is not None and self.parsed_content == 'stderr':
+            self.status = self.parser.parse(stderr)
+            return stdout, None, u'\n'.join(self.parser.unparsed)
+        return stdout, stderr, None
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/_apycotlib/atest.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,300 @@
+"""APyCoT task / test
+
+A task is a queue (pending) test
+
+A test defines :
+* a unit of sources to test (a project)
+* a list of checks to apply to this unit
+* how to build the test environment (preprocessing, dependencies...)
+"""
+from __future__ import with_statement
+
+__docformat__ = "restructuredtext en"
+
+import os
+import os.path
+import sys
+import tempfile
+from shutil import rmtree
+
+from logilab.common.proc import ResourceError
+
+from cubes.apycot.__pkginfo__ import version as apycot_version
+
+from apycotlib import (ConfigError, Command, get_registered,
+                       SUCCESS, SKIPPED, ERROR, KILLED)
+
+
+def substitute(value, substitutions):
+    if hasattr(value, 'replace') and value:
+        for key, val in substitutions.iteritems():
+            value = value.replace('${%s}' % key, val)
+    return value
+
+def substitute_dict(dict, substitutions):
+    for key, value in dict.iteritems():
+        dict[key] = substitute(value, substitutions)
+    return dict
+
+def clean_path(path):
+    """remove trailing path separator from path"""
+    if path and path[-1] == os.sep:
+        return path[:-1]
+    return path
+
+def update_path(old_path, new_path):
+    """update sys.path"""
+    if old_path is not None:
+        for path in old_path.split(os.pathsep):
+            try:
+                sys.path.remove(clean_path(path))
+            except ValueError:
+                continue
+    if new_path is not None:
+        new_path = new_path.split(os.pathsep)
+        new_path.reverse()
+        for path in new_path:
+            sys.path.insert(0, clean_path(path))
+
+
+
+class Test(object):
+    """the single source unit test class"""
+
+    def __init__(self, texec, writer, options):
+        # directory where the test environment will be built
+        self.tmpdir = tempfile.mkdtemp(dir=options.get('test_dir'))
+        # test config / tested project environment
+        self.tconfig = texec.configuration
+        self.environment = texec.environment
+        self.branch = texec.branch
+        # IWriter object
+        self.writer = writer
+        # local caches
+        self._configs = {}
+        self._repositories = {}
+        # environment variables as a dictionary
+        self.environ = self.tconfig.apycot_process_environment()
+        self.environ.update(self.environment.apycot_process_environment())
+        self._substitute(self.environment, self.environ)
+        # track environment change to be able to restore it latter
+        # note sys.path is synchronized with the PYTHONPATH environment variable
+        self._tracks = {}
+        # notify some subprocesses they're executed by apycot through an
+        # environment variable
+        os.environ['APYCOT_ROOT'] = self.tmpdir
+        # flag indicating whether to clean test environment after test execution
+        # or if an archive containing it should be uploaded
+        self.keep_test_dir = options.get('keep_test_dir', False)
+        self.archive = options.get('archive', False)
+        # set of preprocessors which have failed
+        self._failed_pp = set()
+        self.executed_checkers = {}
+        self.global_status = SUCCESS
+
+    def __str__(self):
+        return repr(self.apycot_repository())
+
+    def _substitute(self, pe, configdict):
+        substitute_dict(configdict,
+                        {'NAME': pe.name, 'TESTDIR': self.tmpdir,
+                         'SRCDIR': self.apycot_repository(pe).co_path})
+
+    # resource accessors #######################################################
+
+    def apycot_config(self, pe=None):
+        if pe is None:
+            pe = self.environment
+        try:
+            return self._configs[pe.eid]
+        except KeyError:
+            config = self.tconfig.apycot_configuration(pe)
+            self._configs[pe.eid] = config
+            self._substitute(pe, config)
+            return config
+
+    def apycot_repository(self, pe=None):
+        if pe is None:
+            pe = self.environment
+        try:
+            return self._repositories[pe.eid]
+        except KeyError:
+            from apycotlib.repositories import get_repository
+            repdef = {'repository': pe.repository,
+                      'path': pe.vcs_path,
+                      'branch': self.branch}
+            # don't overwrite branch hardcoded on the environment: have to be
+            # done here, not only in when starting plan (eg in entities.py)
+            # since project env may not be the tested project env
+            pecfg = pe.apycot_configuration()
+            if pecfg.get('branch'):
+                repdef['branch'] = pecfg['branch']
+            apyrep = get_repository(repdef)
+            self._repositories[pe.eid] = apyrep
+            return apyrep
+
+    def project_path(self, subpath=False):
+        path = self.apycot_repository().co_path
+        if subpath and self.apycot_config().get('subpath'):
+            return os.path.join(path, self.apycot_config()['subpath'])
+        return path
+
+    # test initialisation / cleanup ############################################
+
+    def setup(self):
+        """setup the test environment"""
+        self.writer.start()
+        self.writer.raw('apycot', apycot_version, 'version')
+        # setup environment variables
+        if self.environ:
+            for key, val in self.environ.iteritems():
+                self.update_env(self.tconfig.name, key, val)
+
+    def clean(self):
+        """clean the test environment"""
+        try:
+            self.writer.end(self.global_status, self.archive and self.tmpdir)
+        except:
+            # XXX log error
+            pass
+        if not self.keep_test_dir:
+            rmtree(self.tmpdir)
+        else:
+            self.writer.execution_info('temporary directory not removed: %s',
+                                       self.tmpdir)
+
+    # environment tracking #####################################################
+
+    def update_env(self, key, envvar, value, separator=None):
+        """update an environment variable"""
+        envvar = envvar.upper()
+        orig_value = os.environ.get(envvar)
+        if orig_value is None:
+            orig_value = ''
+        uid = self._make_key(key, envvar)
+        assert not self._tracks.has_key(uid)
+        if separator is not None:
+            if orig_value:
+                orig_values = orig_value.split(separator)
+            else:
+                orig_values = [] # don't want a list with an empty string
+            if not value in orig_values:
+                orig_values.insert(0, value)
+                self._set_env(uid, envvar, separator.join(orig_values))
+        elif orig_value != value:
+            self._set_env(uid, envvar, value)
+
+    def clean_env(self, key, envvar):
+        """reinitialize an environment variable"""
+        envvar = envvar.upper()
+        uid = self._make_key(key, envvar)
+        if self._tracks.has_key(uid):
+            orig_value = self._tracks[uid]
+            if envvar == 'PYTHONPATH':
+                update_path(os.environ.get(envvar), orig_value)
+            if self.writer:
+                self.writer.debug('Reset %s=%r', envvar, orig_value)
+            if orig_value is None:
+                del os.environ[envvar]
+            else:
+                os.environ[envvar] = self._tracks[uid]
+            del self._tracks[uid]
+
+    def _make_key(self, key, envvar):
+        """build a key for an environment variable"""
+        return '%s-%s' % (key, envvar)
+
+    def _set_env(self, uid, envvar, value):
+        """set a new value for an environment variable
+        """
+        if self.writer:
+            self.writer.debug('Set %s=%r', envvar, value)
+        orig_value = os.environ.get(envvar)
+        self._tracks[uid] = orig_value
+        os.environ[envvar]  = value
+        if envvar == 'PYTHONPATH':
+            update_path(orig_value, value)
+
+    # api to call a particular preprocessor / checker #########################
+
+    def checkout(self, pe):
+        vcsrepo = self.apycot_repository(pe)
+        cocmd = vcsrepo.co_command()
+        if cocmd:
+            Command(self.writer, cocmd, raises=True, shell=True).run()
+        movebranchcmd = vcsrepo.co_move_to_branch_command()
+        if movebranchcmd:
+            Command(self.writer, movebranchcmd, shell=True).run()
+        self.writer.link_to_revision(pe, vcsrepo)
+        self.writer.refresh_log(True)
+
+    def call_preprocessor(self, pptype, penv):
+        cfg = self.apycot_config(penv)
+        ppid = cfg.get(pptype)
+        if ppid is not None:
+            # fetch preprocessors options set on the project environment
+            preprocessor = get_registered('preprocessor', ppid)(self.writer, cfg)
+        else:
+            # XXX log?
+            return
+        path = self.apycot_repository(penv).co_path
+        dependency = path != self.project_path()
+        msg = 'running preprocessor %(pp)s to perform %(pptype)s'
+        msg_data = {'pptype': pptype,
+                    'pp': preprocessor.id,}
+        if dependency:
+            msg + ' on dependency %(pe)s'
+            msg_data['pe'] = penv.name
+        self.writer.info(msg % msg_data, path=path)
+        try:
+            preprocessor.run(self, path)
+        except Exception, ex:
+            msg = '%s while running preprocessor %s: %s'
+            self.writer.fatal(msg, ex.__class__.__name__, preprocessor.id, ex,
+                              path=path, tb=True)
+            self._failed_pp.add(pptype)
+
+    def run_checker(self, id, options=None, nonexecuted=False):
+        """run all checks in the test environment"""
+        if options is None:
+            options = {}
+        else:
+            self._substitute(self.environment, options)
+        check_writer = self.writer.make_check_writer()
+        checker = get_registered('checker', id)(check_writer, options)
+        check_writer.start(checker)
+        if checker.need_preprocessor in self._failed_pp:
+            msg = 'Can\'t run checker %s: preprocessor %s have failed'
+            check_writer.fatal(msg, checker.id, checker.need_preprocessor)
+            check_writer.end(SKIPPED)
+            return checker, SKIPPED # XXX
+        if nonexecuted:
+            check_writer.end(SKIPPED)
+            return checker, SKIPPED # XXX
+        try:
+            checker.check_options()
+            status = checker.check(self)
+            self.executed_checkers[checker.id] = status
+        except ConfigError, ex:
+            msg = 'Config error for %s checker: %s'
+            check_writer.fatal(msg, checker.id, ex)
+            status = ERROR
+        except ResourceError, ex:
+            check_writer.fatal('%s resource limit reached, aborted', ex.limit)
+            status = KILLED
+            raise
+        except MemoryError:
+            check_writer.fatal('memory resource limit reached, arborted')
+            status = KILLED
+            raise
+        except Exception, ex:
+            msg = 'Error while running checker %s: %s'
+            check_writer.fatal(msg, checker.id, ex, tb=True)
+            status = ERROR
+        finally:
+            check_writer.end(status)
+            #globstatus = min(globstatus, status)
+            self.writer.execution_info('%s [%s]', checker.id, status)
+            with self.writer._lock:
+                self.global_status = min(self.global_status, status)
+        return checker, status
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/_apycotlib/checkers/__init__.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,126 @@
+"""subpackage containing base checkers (mostly for python code and packaging
+standard used at Logilab)
+"""
+
+__docformat__ = "restructuredtext en"
+
+from os.path import walk, splitext, split, join
+
+from logilab.common.textutils import splitstrip
+from logilab.common.proc import RESOURCE_LIMIT_EXCEPTION
+
+from apycotlib import SUCCESS, NODATA, ERROR, TestStatus, ApycotObject
+
+class BaseChecker(ApycotObject):
+    id = None
+    __type__ = 'checker'
+    need_preprocessor = None
+
+    _best_status = None
+
+    def check(self, test):
+        self.status = None
+        try:
+            setup_status = self.setup_check(test)
+            self.set_status(setup_status)
+            if setup_status is None or setup_status:
+                self.set_status(self.do_check(test))
+                self.version_info()
+        finally:
+            self.set_status(self.tear_down_check(test))
+        # do it last to let checker do whatever they want to do.
+        new_status = self.merge_status(self.status, self.best_status)
+        if new_status is not self.status:
+            self.writer.info("Configuration's setting downgrade %s checker status '\
+                        'from <%s> to <%s>" , self.id, self.status, new_status)
+            self.set_status(new_status)
+        return self.status
+
+    def _get_best_status(self):
+        best_status = self._best_status
+        if best_status is None:
+            return None
+        if not isinstance(best_status, TestStatus):
+            best_status = TestStatus.get(best_status)
+        return best_status
+
+    def _set_best_status(self, value):
+        if not isinstance(value, TestStatus):
+            value = TestStatus.get(value)
+        self._best_status = value
+
+    best_status = property(_get_best_status, _set_best_status)
+
+    def version_info(self):
+        """hook for checkers to add their version information"""
+
+    def do_check(self, test):
+        """actually check the test"""
+        raise NotImplementedError("%s must defines a do_check method" % self.__class__)
+
+    def setup_check(self, test):
+        pass
+
+    def tear_down_check(self, test):
+        pass
+
+
+class AbstractFilteredFileChecker(BaseChecker):
+    """check a directory file by file, with an extension filter
+    """
+    checked_extensions =  None
+    options_def = {
+        'ignore': {
+            'type': 'csv', 'default': ['CVS', '.hg', '.svn'],
+            'help': 'comma separated list of files or directories to ignore',
+            },
+        }
+
+    def __init__(self, writer, options=None, extensions=None):
+        BaseChecker.__init__(self, writer, options)
+        self.extensions = extensions or self.checked_extensions
+        if isinstance(self.extensions, basestring):
+            self.extensions = (self.extensions,)
+        self._res = None
+        self._safe_dir = set()
+
+    def files_root(self, test):
+        return test.project_path(subpath=True)
+
+    def do_check(self, test):
+        """run the checker against <path> (usually a directory)
+
+        return true if the test succeeded, else false.
+        """
+        self.set_status(SUCCESS)
+        self._nbanalyzed = 0
+        ignored = self.options.get('ignore')
+        def walk_handler(arg, directory, fnames):
+            """walk callback handler"""
+            full_path = [(filename, join(directory, filename)) for filename in fnames]
+            for fname, fpath in full_path:
+                for ign_pat in ignored:
+                    if ign_pat.endswith((fpath, fname)):
+                        fnames.remove(fname) # fnames need to be replace in place
+            for filename in fnames:
+                ext = splitext(filename)[1]
+                if self.extensions is None or ext in self.extensions:
+                    try:
+                        self.set_status(self.check_file(join(directory, filename)))
+                    except RESOURCE_LIMIT_EXCEPTION:
+                        raise
+                    except Exception, ex:
+                        self.writer.fatal(u"%s", ex, path=filename, tb=True)
+                        self.set_status(ERROR)
+                    self._nbanalyzed += 1
+
+        files_root = self.files_root(test)
+        self.writer.raw('file root', files_root)
+        walk(self.files_root(test), walk_handler, files_root)
+        self.writer.raw('total files analyzed', self._nbanalyzed)
+        if self._nbanalyzed <= 0:
+            self.set_status(NODATA)
+        return self.status
+
+    def check_file(self, path):
+        raise NotImplementedError()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/_apycotlib/checkers/jslint.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,181 @@
+import re
+import os
+import logging
+from subprocess import call
+from os.path import exists, dirname, join, abspath
+from re import compile
+
+from cubicweb import devtools # so cubes get importable
+from cubes import apycot # so apycotlib get importable
+
+from apycotlib import register, OutputParser, ParsedCommand, FAILURE
+from apycotlib.checkers import AbstractFilteredFileChecker
+
+# in source installation, jslint.js is in apycot/ext, we're currently in
+# apycot/_apycotlib/checkers
+JSLINT_PATH = join(dirname(dirname(dirname(abspath(__file__)))), 'ext', 'jslint.js')
+if not exists(JSLINT_PATH):
+    JSLINT_PATH = '/usr/share/apycot/ext/jslint.js'
+
+class JsLintParser(OutputParser):
+    """Simple Parser class interpretting
+
+    Lint at line 8 character 1: 'CubicWeb' is not defined.
+    CubicWeb.require('htmlhelpers.js');
+    """
+    non_zero_status_code = FAILURE
+
+    RE_MSG = re.compile(r'^Lint at line (\d+) character (\d+):\s*(.*)$')
+    #RE_NO_ISSUE = re.compile(r'^jslint: No problems found in ')
+
+    JSLINT_MSG = (
+        (logging.INFO,    "Unnecessary semicolon.",),
+        (logging.INFO,    "Unnecessary escapement.",),
+        (logging.INFO,    "The 'language' attribute is deprecated.",),
+        (logging.INFO,    "Inner functions should be listed at the top of the outer function.",),
+        (logging.INFO,    compile(r"Don't use extra leading zeros '.*?'\."),),
+        (logging.INFO,    "Confusing plusses.",),
+        (logging.INFO,    "Confusing minusses.",),
+        (logging.INFO,    compile(r"A trailing decimal point can be confused with a dot '.*?'i\."),),
+        (logging.INFO,    compile(r"A leading decimal point can be confused with a dot '\..*?'\."),),
+        (logging.INFO,    compile(r"'.*?' is better written without quotes\."),),
+        (logging.INFO,    compile(r"['.*?'] is better written in dot notation\."),),
+        (logging.INFO,    "A dot following a number can be confused with a decimal point.",),
+        (logging.WARNING, "Weird construction. Delete 'new'.",),
+        (logging.WARNING, "Use the object literal notation {}.",),
+        (logging.WARNING, "Use the isNaN function to compare with NaN.",),
+        (logging.WARNING, "Use the array literal notation [].",),
+        (logging.WARNING, compile(r"Use '.*?' to compare with '.*?'\."),),
+        (logging.WARNING, compile(r"Unrecognized tag '<.*?>'\."),),
+        (logging.WARNING, compile(r"Unrecognized attribute '<.*? .*?>'\."),),
+        (logging.WARNING, compile(r"Unreachable '.*?' after '.*?'\."),),
+        (logging.WARNING, "This 'switch' should be an 'if'.",),
+        (logging.WARNING, "'new' should not be used as a statement\.",),
+        (logging.WARNING, compile(r"Label '.*?' on .*? statement\."),),
+        (logging.WARNING, compile(r"Label '.*?' looks like a javascript url\."),),
+        (logging.WARNING, "JavaScript URL.",),
+        (logging.WARNING, "Implied eval is evil. Pass a function instead of a string.",),
+        (logging.WARNING, compile(r"Identifier .*? already declared as .*?\."),),
+        (logging.WARNING, "HTML case error.",),
+        (logging.WARNING, "Expected to see a statement and instead saw a block.",),
+        (logging.WARNING, compile(r"Expected to see a '\(' or '=' or ':' or ',' or '\[' preceding a regular expression literal, and instead saw '.*?'\."),),
+        (logging.WARNING, compile(r"Expected '.*?' to match '.*?' from line .*? and instead saw '.*?'\."),),
+        (logging.WARNING, compile(r"Expected '.*?' to have an indentation at .*? instead at .*?\."),),
+        (logging.WARNING, compile(r"Expected an operator and instead saw '.*?'\."),),
+        (logging.WARNING, "Expected an identifier in an assignment and instead saw a function invocation.   ",),
+        (logging.WARNING, compile(r"Expected an identifier and instead saw '.*?' (a reserved word)\."),),
+        (logging.WARNING, compile(r"Expected an identifier and instead saw '.*?'\."),),
+        (logging.WARNING, "Expected an assignment or function call and instead saw an expression.",),
+        (logging.WARNING, "Expected a 'break' statement before 'default'.",),
+        (logging.WARNING, "Expected a 'break' statement before 'case'.",),
+        (logging.WARNING, compile(r"Expected '.*?' and instead saw '.*?'\."),),
+        (logging.WARNING, "eval is evil.",),
+        (logging.WARNING, "Each value should have its own case label.",),
+        (logging.WARNING, compile(r"Do not use the .*? function as a constructor\."),),
+        (logging.WARNING, "document.write can be a form of eval.",),
+        (logging.WARNING, compile(r"Control character in string: .*?\."),),
+        (logging.WARNING, "All 'debugger' statements should be removed.",),
+        (logging.WARNING, "Adsafe restriction.",),
+        (logging.WARNING, compile(r"Adsafe restricted word '.*?'\."),),
+        (logging.WARNING, "A constructor name should start with an uppercase letter.",),
+        (logging.WARNING, compile(r".*? (.*?% scanned)\."),),
+        (logging.FATAL,   "What the hell is this?",),
+        (logging.ERROR,   compile(r"Variable .*? was used before it was declared\."),),
+        (logging.ERROR,   compile(r"Unmatched '.*?'\."),),
+        (logging.ERROR,   compile(r"Unexpected use of '.*?'\."),),
+        (logging.ERROR,   compile(r"Unexpected space after '.*?'\."),),
+        (logging.ERROR,   "Unexpected early end of program.",),
+        (logging.ERROR,   compile(r"Unexpected characters in '.*?'\."),),
+        (logging.ERROR,   compile(r"Unexpected '.*?'\."),),
+        (logging.ERROR,   compile(r"Undefined .*? '.*?'\."),),
+        (logging.ERROR,   compile(r"Unclosed string\."),),
+        (logging.ERROR,   "Unclosed comment.",),
+        (logging.ERROR,   "Unbegun comment.",),
+        (logging.ERROR,   "The Function constructor is eval.",),
+        (logging.ERROR,   "Nested comment.",),
+        (logging.ERROR,   compile(r"Missing space after '.*?'\."),),
+        (logging.ERROR,   "Missing semicolon.",),
+        (logging.ERROR,   "Missing radix parameter.",),
+        (logging.ERROR,   "Missing quote.",),
+        (logging.ERROR,   "Missing ':' on a case clause.",),
+        (logging.ERROR,   "Missing 'new' prefix when invoking a constructor.",),
+        (logging.ERROR,   "Missing name in function statement.",),
+        (logging.ERROR,   "Missing '()' invoking a constructor.",),
+        (logging.ERROR,   "Missing close quote on script attribute.",),
+        (logging.ERROR,   compile(r"Missing boolean after '.*?'\."),),
+        (logging.ERROR,   compile(r"Missing ':' after '.*?'\."),),
+        (logging.ERROR,   compile(r"Missing '.*?'\."),),
+        (logging.ERROR,   compile(r"Line breaking error '.*?'\."),),
+        (logging.ERROR,   "Function statements are not invocable. Wrap the function expression in parens.   ",),
+        (logging.ERROR,   "Extra comma.",),
+        (logging.ERROR,   compile(r"Bad value '.*?'\."),),
+        (logging.ERROR,   "Bad structure.",),
+        (logging.ERROR,   "Bad regular expression.",),
+        (logging.ERROR,   compile(r"Bad number '.*?'\."),),
+        (logging.ERROR,   compile(r"Bad name '.*?'\."),),
+        (logging.ERROR,   compile(r"Bad jslint option '.*?'\."),),
+        (logging.ERROR,   "Bad invocation.",),
+        (logging.ERROR,   compile(r"Bad extern identifier '.*?'\."),),
+        (logging.ERROR,   "Bad escapement.",),
+        (logging.ERROR,   compile(r".*? .*? declared in a block\."),),
+        (logging.ERROR,   "Bad constructor.",),
+        (logging.ERROR,   "Bad assignment.",),
+        (logging.ERROR,   compile(r"Attribute '.*?' does not belong in '<.*?>'\."),),
+        (logging.ERROR,   "Assignment in control part.",),
+        (logging.ERROR,   compile(r"A '<.*?>' must be within '<.*?>'\."),),
+    )
+
+    @classmethod
+    def get_msg_level(cls, msg, default=logging.ERROR):
+        msg = msg.strip()
+        for level, msg_pat in cls.JSLINT_MSG:
+            if (hasattr(msg_pat, 'match') and msg_pat.match(msg)) or msg == msg_pat:
+                return level
+        else:
+            return default
+
+    def __init__(self, *args, **kwargs):
+        super(JsLintParser, self).__init__(*args, **kwargs)
+        # line, char_idx, msg
+        self._ctx  = None
+
+    def parse_line(self, line):
+        if not line:
+            self._ctx = None
+            return
+        match = self.RE_MSG.match(line)
+        if match:
+            self._ctx = match.groups()
+        elif self._ctx is not None:
+            filepath = self.path
+            lineno = '%s:%s' % (self._ctx[0], self._ctx[1])
+            msg  = self._ctx[2]
+            level = self.get_msg_level(msg)
+            self.writer.log(level, msg, path=filepath, line=lineno)
+            self.set_status(FAILURE)
+        else:
+            self.unparsed.append(line)
+
+
+
+if not call(['which', 'rhino'], stdout=file(os.devnull, 'w')):
+
+    class JsLintChecker(AbstractFilteredFileChecker):
+        """Js Lint checker for each *.js file"""
+
+        id = 'jslint'
+        need_preprocessor = 'build_js'
+        checked_extensions = ('.js', )
+
+        def check_file(self, path):
+            command = ['rhino', JSLINT_PATH, path]
+            return ParsedCommand(self.writer, command, parsercls=JsLintParser,
+                                 path=path).run()
+
+        def version_info(self):
+            super(JsLintChecker, self).version_info()
+            self.record_version_info('jslint', '2010-04-06')
+
+    register('checker', JsLintChecker)
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/_apycotlib/checkers/python.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,718 @@
+"""checkers for python source files
+
+:organization: Logilab
+:copyright: 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+:license: General Public License version 2 - http://www.gnu.org/licenses
+"""
+from __future__ import with_statement
+
+__docformat__ = "restructuredtext en"
+
+import sys
+import os
+import re
+from commands import getoutput
+from os.path import join, exists, abspath, dirname, split
+from test import test_support
+from warnings import warn
+
+from logilab.common.testlib import find_tests
+from logilab.common.modutils import get_module_files
+from logilab.common.fileutils import norm_read
+from logilab.common.shellutils import pushd
+from logilab.common.decorators import cached
+from logilab.common.compat import any
+from logilab.common.proc import RESOURCE_LIMIT_EXCEPTION
+
+try:
+    from logilab.devtools.lib.pkginfo import PackageInfo
+    from logilab.devtools.__pkginfo__ import version as devtools_version
+except ImportError:
+    devtools_version = 'nc'
+
+try:
+    # development version
+    from logilab.devtools.lib import coverage
+    COVFILE = coverage.__file__.replace('.pyc', '.py')
+except ImportError:
+    try:
+        # debian installed version
+        import coverage
+        COVFILE = coverage.__file__.replace('.pyc', '.py')
+    except ImportError:
+        coverage = None
+
+from apycotlib import register
+from apycotlib import SUCCESS, FAILURE, PARTIAL, NODATA, ERROR
+from apycotlib import SimpleOutputParser, ParsedCommand
+from apycotlib.checkers import BaseChecker, AbstractFilteredFileChecker
+
+
+def pyinstall_path(test):
+    path = _pyinstall_path(test)
+    if not exists(path):
+        raise Exception('path %s doesn\'t exist' %  path)
+    return path
+
+@cached
+def _pyinstall_path(test):
+    """return the project's installation path"""
+    config = test.apycot_config()
+    if 'install_path' in config:
+        return config['install_path']
+    modname = config.get('python_modname')
+    if modname:
+        return join(test.tmpdir, 'local', 'lib', 'python', *modname.split('.'))
+    elif exists(join(test.tmpdir, test.project_path(), '__pkginfo__.py')):
+        pkginfo = PackageInfo(directory=join(test.tmpdir, test.project_path()))
+        modname = getattr(pkginfo, 'modname', None)
+        package = getattr(pkginfo, 'subpackage_of', None)
+        if modname and package:
+            modname = '%s.%s' % (package, modname)
+        if modname:
+            path = join(test.tmpdir, 'local', 'lib', 'python',
+                        *modname.split('.'))
+            cfg = test.apycot_config()
+            if cfg.get('subpath'):
+                path = join(path, cfg['subpath'])
+        return path
+    return join(test.tmpdir, test.project_path(subpath=True))
+
+def pyversion_available(python):
+    return not os.system('%s -V 2>/dev/null' % python)
+
+
+class PythonSyntaxChecker(AbstractFilteredFileChecker):
+    """check syntax of python file
+
+       Use Pylint to check a score for python package. The check fails if the score is
+       inferior to a given threshold.
+    """
+
+    id = 'python_syntax'
+    checked_extensions = ('.py', )
+
+    def check_file(self, filepath):
+        """try to compile the given file to see if it's syntaxicaly correct"""
+        # Try to compile it. If compilation fails, then there's a
+        # SyntaxError
+        try:
+            compile(norm_read(filepath) + '\n', filepath, "exec")
+            return SUCCESS
+        except SyntaxError, error:
+            self.writer.error(error.msg, path=filepath, line=error.lineno)
+            return FAILURE
+
+    def version_info(self):
+        self.record_version_info('python', sys.version)
+
+register('checker', PythonSyntaxChecker)
+
+
+class PyTestParser(SimpleOutputParser):
+    status = NODATA
+    non_zero_status_code = FAILURE
+    # search for following output:
+    #
+    # 'Ran 42 test cases in 0.07s (0.07s CPU), 3 errors, 31 failures, 3 skipped'
+    regex = re.compile(
+        r'Ran (?P<total>[0-9]+) test cases '
+        'in (?P<time>[0-9]+(.[0-9]+)?)s \((?P<cputime>[0-9]+(.[0-9]+)?)s CPU\)'
+        '(, )?'
+        '((?P<errors>[0-9]+) errors)?'
+        '(, )?'
+        '((?P<failures>[0-9]+) failures)?'
+        '(, )?'
+        '((?P<skipped>[0-9]+) skipped)?'
+        )
+
+    total, failures, errors, skipped = 0, 0, 0, 0
+
+    def __init__(self, writer, options=None):
+        super(PyTestParser, self).__init__(writer, options)
+        self.total    = 0
+        self.failures = 0
+        self.skipped  = 0
+        self.errors   = 0
+
+    def _parse(self, stream):
+        self.status = None
+        super(PyTestParser, self)._parse(stream)
+        if self.errors or self.failures:
+            self.set_status(FAILURE)
+        elif self.skipped:
+            self.set_status(PARTIAL)
+        elif not self.total:
+            self.set_status(NODATA)
+        elif self.total >= 0:
+            self.set_status(SUCCESS)
+
+    @property
+    def success(self):
+        return max(0, self.total - sum(( self.failures, self.errors,
+                                         self.skipped,)))
+    def add_junk(self, line):
+        if any(c for c in line if c not in 'EFS. \n\t\r-*'):
+            self.unparsed.append(line)
+
+    def extract_tests_status(self, values):
+        for status in ('failures', 'errors', 'skipped'):
+            try:
+                setattr(self, status,
+                        max(getattr(self, status), int(values[status])))
+            except TypeError:
+                pass
+
+    def parse_line(self, line):
+        match = self.regex.match(line)
+        if match is not None:
+            values = match.groupdict()
+            total = int(values['total'])
+            self.total += total
+            self.extract_tests_status(values)
+        else:
+            self.add_junk(line)
+
+PYVERSIONS_OPTIONS = {
+    'tested_python_versions': {
+        'type': 'csv',
+        'help': ('comma separated list of python version (such as 2.5) that '
+                 'should be considered for testing.'),
+        },
+    'ignored_python_versions': {
+        'type': 'csv',
+        'help': ('comma separated list of python version (such as 2.5) that '
+                 'should be ignored for testing when '
+                 'use_pkginfo_python_versions is set to 1.'),
+        },
+    'use_pkginfo_python_versions': {
+        'type': 'int', 'default': True,
+        'help': ('0/1 flag telling if tested python version should be '
+                 'determinated according to __pkginfo__.pyversion of the '
+                 'tested project. This option is ignored if tested_python_versions '
+                 'is set.'),
+        },
+    'pycoverage': {
+        'type': 'int', 'default': False,
+        'help': ('Tell if test should be run with pycoverage  to gather '
+                 'coverage data.'),
+        },
+    }
+
+class PyTestChecker(BaseChecker):
+    """check that unit tests of a python package succeed using the pytest command
+    (from logilab.common)
+    """
+
+    id = 'pytest'
+    need_preprocessor = 'install'
+    parsercls = PyTestParser
+    parsed_content = 'stdout'
+    options_def = PYVERSIONS_OPTIONS.copy()
+    options_def.update({
+        'pytest.extra_argument': {
+            'type': 'string',
+            'help': ('extra argument to give to pytest. Add this option multiple '
+                     'times in the correct order to give several arguments.'),
+            },
+        })
+
+    def __init__(self, writer, options=None):
+        BaseChecker.__init__(self, writer, options)
+        self._path = None
+        self.test = None
+
+    @property
+    @cached
+    def pyversions(self):
+        tested_pyversions = self.options.get("tested_python_versions")
+        if tested_pyversions:
+            pyversions = set(tested_pyversions)
+        elif self.options.get("use_pkginfo_python_versions"):
+            try:
+                pkginfodir = dirname(self.test.environ['pkginfo'])
+            except KeyError:
+                pkginfodir = self.test.project_path()
+            try:
+                pkginfo = PackageInfo(directory=pkginfodir)
+                pyversions = set(pkginfo.pyversions)
+            except (NameError, ImportError):
+                pyversions = set()
+            ignored_pyversions = self.options.get("ignored_python_versions")
+            if ignored_pyversions:
+                ignored_pyversions = set(ignored_pyversions)
+                ignored_pyversions = pyversions.intersection(
+                    ignored_pyversions)
+                if ignored_pyversions:
+                    for py_ver in ignored_pyversions:
+                        self.writer.debug("python version %s ignored", py_ver)
+                    pyversions.difference_update(ignored_pyversions)
+        else:
+            pyversions = None
+        if pyversions:
+            pyversions_ = []
+            for pyver in pyversions:
+                python = 'python%s' % pyver
+                if not pyversion_available(python):
+                    self.writer.error('config asked for %s, but it\'s not available', pyver)
+                else:
+                    pyversions_.append(python)
+            pyversions = pyversions_
+        else:
+            pyversions = [sys.executable]
+        return pyversions
+
+    def version_info(self):
+        if self.pyversions:
+            self.record_version_info('python', ', '.join(self.pyversions))
+
+    def enable_coverage(self):
+        if self.options.get('pycoverage') and coverage:
+            # save back location of the coverage data file for usage in
+            # narval action
+            self.coverage_data = join(testdir, '.coverage')
+            return True
+        return False
+
+    def setup_check(self, test):
+        """run the checker against <path> (usually a directory)"""
+        test_support.verbose = 0
+        self.test = test
+        if not self.pyversions:
+            self.writer.error('no required python version available')
+            return ERROR
+        return SUCCESS
+
+    def do_check(self, test):
+        if self.enable_coverage():
+            command = ['-c', 'from logilab.common.pytest import run; import sys; sys.argv=["pytest", "--coverage"]; run()']
+        else:
+            command = ['-c', 'from logilab.common.pytest import run; run()']
+        extraargs = self.options.get("pytest.extra_argument", [])
+        if not isinstance(extraargs, list):
+            command.append(extraargs)
+        else:
+            command += extraargs
+        cwd = os.getcwd()
+        # XXX may cause unexpected bug in executed concurrently with another step
+        os.chdir(pyinstall_path(test))
+        try:
+            status = SUCCESS
+            testresults = {'success': 0, 'failures': 0,
+                           'errors': 0, 'skipped': 0}
+            total = 0
+            for python in self.pyversions:
+                cmd = self.run_test(command, python)
+                for rtype in testresults:
+                    total += getattr(cmd.parser, rtype)
+                    testresults[rtype] += getattr(cmd.parser, rtype)
+                status = self.merge_status(status, cmd.status)
+            self.execution_info(total, testresults)
+            return status
+        finally:
+            os.chdir(cwd)
+
+    def execution_info(self, total, testresults):
+        self.writer.raw('total_test_cases', total, 'result')
+        self.writer.raw('succeeded_test_cases', testresults['success'], 'result')
+        self.writer.raw('failed_test_cases', testresults['failures'], 'result')
+        self.writer.raw('error_test_cases', testresults['errors'], 'result')
+        self.writer.raw('skipped_test_cases', testresults['skipped'], 'result')
+
+    def get_command(self, command, python):
+        return [python, '-W', 'ignore'] + command
+
+    def run_test(self, command, python='python'):
+        """execute the given test file and parse output to detect failed /
+        succeed test cases
+        """
+        if isinstance(command, basestring):
+            command = [command]
+        command = self.get_command(command, python)
+        cmd = ParsedCommand(self.writer, command,
+                            parsercls=self.parsercls,
+                            parsed_content=self.parsed_content,
+                            path=self._path)
+        #cmd.parser.path = join(self._path, command[0]) # XXX
+        cmd.run()
+        cmd.set_status(cmd.parser.status)
+        return cmd
+
+register('checker', PyTestChecker)
+
+
+class PyUnitTestParser(PyTestParser):
+    result_regex = re.compile(
+        r'(OK|FAILED)'
+        '('
+        ' \('
+        '(failures=(?P<failures>[0-9]+))?'
+        '(, )?'
+        '(errors=(?P<errors>[0-9]+))?'
+        '(, )?'
+        '(skipped=(?P<skipped>[0-9]+))?'
+        '\)'
+        ')?')
+
+    total_regex = re.compile(
+        'Ran (?P<total>[0-9]+) tests?'
+        ' in (?P<time>[0-9]+(.[0-9]+)?s)')
+
+    def parse_line(self, line):
+        match = self.total_regex.match(line)
+        if match is not None:
+            self.total = int(match.groupdict()['total'])
+            return
+        match = self.result_regex.match(line)
+        if match is not None:
+            self.extract_tests_status(match.groupdict())
+            return
+        self.add_junk(line)
+
+
+class PyUnitTestChecker(PyTestChecker):
+    """check that unit tests of a python package succeed
+
+    Execute tests found in the "test" or "tests" directory of the package. The
+    check succeed if no test cases failed. Note each test module is executed by
+    a spawed python interpreter and the output is parsed, so tests should use
+    the default text output of the unittest framework, and avoid messages on
+    stderr.
+
+    spawn unittest and parse output (expect a standard TextTestRunner)
+    """
+
+    id = 'pyunit'
+    parsed_content = 'stderr'
+    parsercls = PyUnitTestParser
+    options_def = PYVERSIONS_OPTIONS.copy()
+    options_def.update({
+        'test_dirs': {
+            'type': 'csv', 'default': ('test', 'tests'),
+            'help': ('comma separated list of directories where tests could be '
+                     'find. Search in "test" and "tests by default.'),
+            },
+        })
+
+    def do_check(self, test):
+        status = SUCCESS
+        testdirs = self.options.get("test_dirs")
+        basepath = test.project_path(subpath=True)
+        for testdir in testdirs:
+            testdir = join(basepath, testdir)
+            if exists(testdir):
+                self._path = testdir
+                with pushd(testdir):
+                    _status = self.run_tests()
+                    status = self.merge_status(status, _status)
+                break
+        else:
+            self.writer.error('no test directory', path=basepath)
+            status = NODATA
+        return status
+
+    def run_tests(self):
+        """run a package test suite
+        expect to be in the test directory
+        """
+        tests = find_tests('.')
+        if not tests:
+            self.writer.error('no test found', path=self._path)
+            return NODATA
+        status = SUCCESS
+        testresults = {'success': 0, 'failures': 0,
+                       'errors': 0, 'skipped': 0}
+        total = 0
+        for python in self.pyversions:
+            for test_file in tests:
+                cmd = self.run_test(test_file + '.py', python)
+                total += cmd.parser.total
+                for rtype in testresults:
+                    testresults[rtype] += getattr(cmd.parser, rtype)
+                if cmd.status == NODATA:
+                    self.writer.error('no test found', path=test_file)
+                status = self.merge_status(status, cmd.status)
+        self.execution_info(total, testresults)
+        return status
+
+    def get_command(self, command, python):
+        if self.enable_coverage():
+            return [python, '-W', 'ignore',  COVFILE, '-x',
+                    '-p', pyinstall_path(self.test)] + command
+        return [python, '-W', 'ignore'] + command
+
+
+register('checker', PyUnitTestChecker)
+
+
+class PyDotTestParser(PyUnitTestParser):
+    line_regex = re.compile(
+            r'(?P<filename>\w+\.py)(\[(?P<ntests>\d+)\] | - )(?P<results>.*)')
+
+    # XXX overwrite property
+    success = 0
+
+    def _parse(self, stream):
+        for _, _, _, results in self.line_regex.findall(stream.read()):
+            if results == "FAILED TO LOAD MODULE":
+                self.errors += 1
+            else:
+                self.success += results.count('.')
+                self.total += results.count('.')
+                self.failures += results.count('F')
+                self.total += results.count('F')
+                self.errors += results.count('E')
+                self.total += results.count('E')
+                self.skipped += results.count('s')
+                self.total += results.count('s')
+        if self.failures or self.errors:
+            self.set_status(FAILURE)
+        elif self.skipped:
+            self.set_status(PARTIAL)
+        elif not self.success:
+            self.set_status(NODATA)
+
+
+class PyDotTestChecker(PyUnitTestChecker):
+    """check that py.test based unit tests of a python package succeed
+
+    spawn py.test and parse output (expect a standard TextTestRunner)
+    """
+    need_preprocessor = 'install'
+    id = 'py.test'
+    parsercls = PyDotTestParser
+    parsed_content = 'stdout'
+    options_def = PYVERSIONS_OPTIONS.copy()
+
+    def get_command(self, command, python):
+        # XXX coverage
+        return ['py.test', '--exec=%s' % python, '--nomagic', '--tb=no'] + command
+
+register('checker', PyDotTestChecker)
+
+
+class PyLintChecker(BaseChecker):
+    """check that the python package as a decent pylint evaluation
+    """
+    need_preprocessor = 'install'
+    id = 'pylint'
+    options_def = {
+        'pylintrc': {
+            'help': ('path to a pylint configuration file.'),
+            },
+        'pylint.threshold': {
+            'type': 'int', 'default': 7,
+            'help': ('integer between 1 and 10 telling expected pylint note to '
+                     'pass this check. Default to 7.'),
+         },
+        'pylint.show_categories': {
+            'type': 'csv', 'default': ['E', 'F'],
+            'help': ('comma separated list of pylint message categories to add to '
+                     'reports. Default to error (E) and failure (F).'),
+         },
+        'pylint.additional_builtins': {
+            'type': 'csv',
+            'help': ('comma separated list of additional builtins to give to '
+                     'pylint.'),
+            },
+        'pylint.disable': {
+            'type': 'csv',
+            'help': ('comma separated list of pylint message id that should be '
+                     'ignored.'),
+            },
+        'pylint.ignore': {
+            'type': 'csv',
+            'help': 'comma separated list of files or directories to ignore',
+            },
+        }
+
+    def version_info(self):
+        self.record_version_info('pylint', pylint_version)
+
+    def do_check(self, test):
+        """run the checker against <path> (usually a directory)"""
+        threshold = self.options.get('pylint.threshold')
+        pylintrc_path = self.options.get('pylintrc')
+        linter = PyLinter(pylintrc=pylintrc_path)
+        # register checkers
+        checkers.initialize(linter)
+        # load configuration
+        package_wd_path = test.project_path()
+        if exists(join(package_wd_path, 'pylintrc')):
+            linter.load_file_configuration(join(package_wd_path, 'pylintrc'))
+        else:
+            linter.load_file_configuration()
+        linter.set_option('persistent', False)
+        linter.set_option('reports', 0, action='store')
+        linter.quiet = 1
+        # set file or dir to ignore
+        for option in ('ignore', 'additional_builtins', 'disable'):
+            value = self.options.get('pylint.' + option)
+            if value is not None:
+                linter.global_set_option(option.replace('_', '-'), ','.join(value))
+        # message categories to record
+        categories = self.options.get('pylint.show_categories')
+        linter.set_reporter(MyLintReporter(self.writer, test.tmpdir, categories))
+        # run pylint
+        linter.check(pyinstall_path(test))
+        try:
+            note = eval(linter.config.evaluation, {}, linter.stats)
+            self.writer.raw('pylint.evaluation', '%.2f' % note, 'result')
+        except ZeroDivisionError:
+            self.writer.raw('pylint.evaluation', '0', 'result')
+            note = 0
+        except RESOURCE_LIMIT_EXCEPTION:
+            raise
+        except Exception:
+            self.writer.error('Error while processing pylint evaluation',
+                              path=test.project_path(subpath=True), tb=True)
+            note = 0
+        self.writer.raw('statements', '%i' % linter.stats['statement'], 'result')
+        if note < threshold:
+            return FAILURE
+        return SUCCESS
+
+try:
+    from pylint import checkers
+    from pylint.lint import PyLinter
+    from pylint.__pkginfo__ import version as pylint_version
+    from pylint.interfaces import IReporter
+    register('checker', PyLintChecker)
+
+    class MyLintReporter(object):
+        """a partial pylint writer (implements only the message method, not
+        methods necessary to display layouts
+        """
+        __implements__ = IReporter
+
+        def __init__(self, writer, basepath, categories):
+            self.writer = writer
+            self.categories = set(categories)
+            self._to_remove = len(basepath) + 1 # +1 for the leading "/"
+
+        def add_message(self, msg_id, location, msg):
+            """ manage message of different type and in the context of path """
+            if not msg_id[0] in self.categories:
+                return
+            path, line = location[0], location[-1]
+            path = path[self._to_remove:]
+            if msg_id[0] == 'I':
+                self.writer.info(msg, path=path, line=line)
+            elif msg_id[0]  == 'E':
+                self.writer.error(msg, path=path, line=line)
+            elif msg_id[0] == 'F':
+                self.writer.fatal(msg, path=path, line=line)
+            else: # msg_id[0] in ('R', 'C', 'W')
+                self.writer.warning(msg, path=path, line=line)
+
+        def display_results(self, layout):
+            pass
+except ImportError, e:
+    warn("unable to import pylint. Pylint checker disabled : %s" % e)
+
+
+class PyCoverageChecker(BaseChecker):
+    """check the tests coverage of a python package
+
+    if used, it must be after the pyunit checker
+
+    When devtools is available, test will be launched in a coverage mode. This
+    test will gather coverage information, and will succeed if the test coverage
+    is superior to a given threshold. *This checker must be executed after the
+    python_unittest checker.
+    """
+
+    id = 'pycoverage'
+    options_def = {
+        'coverage.threshold': {
+            'type': 'int', 'default': 80,
+            'help': ('integer between 1 and 100 telling expected percent coverage '
+                     'to pass this check. Default to 80.'),
+            },
+        'coverage_data': {
+            'required': True,
+            'help': ('integer between 1 and 100 telling expected percent coverage '
+                     'to pass this check. Default to 80.'),
+            },
+        }
+
+    def version_info(self):
+        self.record_version_info('devtools', devtools_version)
+
+    def do_check(self, test):
+        """run the checker against <path> (usually a directory)"""
+        self.threshold = self.options.get('coverage.threshold')
+        assert self.threshold, 'no threshold defined'
+        # XXX check the pyunit check is executed before (with coverage
+        # activated) ?
+        coverage_file = self.options.get('coverage_data')
+        if not exists(coverage_file):
+            self.writer.fatal('no coverage information', path=coverage_file)
+            return NODATA
+        directory, fname = split(coverage_file)
+        with pushd(directory):
+            percent = self._get_cover_info(fname, pyinstall_path(test))
+        if percent < self.threshold:
+            return FAILURE
+        return SUCCESS
+
+    def _get_cover_info(self, fname, inst_path):
+        covertool = coverage.Coverage()
+        covertool.cache_default = fname
+        covertool.restore()
+        stats = covertool.report_stat(inst_path, ignore_errors=1)
+        percent = stats[coverage.TOTAL_ENTRY][2]
+        self.writer.raw('coverage', '%.3f' % (percent, ), 'result')
+        result = []
+        for name in stats.keys():
+            if name == coverage.TOTAL_ENTRY:
+                continue
+            nb_stmts, nb_exec_stmts, pc, pc_missing, readable = stats[name]
+            if pc == 100:
+                continue
+            result.append( (pc_missing, name, pc, readable) )
+        result.sort()
+        for _, name, pc_cover, readable in result:
+            msg = '%d %% covered, missing %s' % (pc_cover, readable)
+            if pc_cover < ( self.threshold / 2):
+                self.writer.error(msg, path=name)
+            elif pc_cover < self.threshold:
+                self.writer.warning(msg, path=name)
+            else:
+                self.writer.info(msg, path=name)
+        return percent
+
+if coverage is not None:
+    register('checker', PyCoverageChecker)
+
+
+class PyCheckerOutputParser(SimpleOutputParser):
+    non_zero_status_code = FAILURE
+    def parse_line(self, line):
+        try:
+            path, line, msg = line.split(':')
+            self.writer.error(msg, path=path, line=line)
+            self.status = FAILURE
+        except ValueError:
+            self.unparsed.append(line)
+
+class PyCheckerChecker(BaseChecker):
+    """check that unit tests of a python package succeed
+
+    spawn unittest and parse output (expect a standard TextTestRunner)
+    """
+
+    id = 'pychecker'
+    need_preprocessor = 'install'
+
+    def do_check(self, test):
+        """run the checker against <path> (usually a directory)"""
+        command = ['pychecker', '-Qqe', 'Style']
+        command += get_module_files(pyinstall_path(test))
+        return ParsedCommand(self.writer, command, parsercls=PyCheckerOutputParser).run()
+
+    def version_info(self):
+        self.record_version_info('pychecker', getoutput("pychecker --version").strip())
+
+register('checker', PyCheckerChecker)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/_apycotlib/preprocessors/__init__.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,16 @@
+"""preprocessors packages"""
+
+__docformat__ = "restructuredtext en"
+
+from apycotlib import ApycotObject
+
+class BasePreProcessor(ApycotObject):
+    """an abstract class providing some common utilities for preprocessors
+    """
+    __type__ = 'preprocessor'
+
+    def run(self, test, path):
+        """Run preprocessor against source in <path> in <test> context"""
+        raise NotImplementedError()
+
+from apycotlib.preprocessors import distutils
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/_apycotlib/preprocessors/distutils.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,78 @@
+"""installation preprocessor using distutils setup.py
+
+:organization: Logilab
+:copyright: 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+:license: General Public License version 2 - http://www.gnu.org/licenses
+"""
+from __future__ import with_statement
+
+__docformat__ = "restructuredtext en"
+
+import os
+import shutil
+from os.path import join, exists, abspath
+
+from logilab.common import optik_ext as opt
+from logilab.common.shellutils import pushd
+
+from apycotlib import register, SetupException
+from apycotlib import Command
+from apycotlib.preprocessors import BasePreProcessor
+
+
+class DistutilsProcessor(BasePreProcessor):
+    """python setup.py pre-processor
+
+       Use a distutils'setup.py script to install a Python package. The
+       setup.py should provide an "install" function which run the setup and
+       return a "dist" object (i.e. the object return by the distutils.setup
+       function). This preprocessor may modify the PATH and PYTHONPATH
+       environment variables.
+    """
+    id = 'python_setup'
+    _python_path_set = None
+    _installed = set()
+
+    options_def = {
+        'verbose': {
+            'type': 'int', 'default': False,
+            'help': 'set verbose mode'
+            },
+        }
+
+    # PreProcessor interface ##################################################
+
+    def run(self, test, path=None):
+        """run the distutils setup.py install method on a path if
+        the path is not yet installed
+        """
+        if path is None:
+            path = test.project_path()
+        if not DistutilsProcessor._python_path_set:
+            path = test.project_path()
+            py_lib_dir = join(os.getcwd(), 'local', 'lib', 'python')
+            # setuptools need this directory to exists
+            if not exists(py_lib_dir):
+                os.makedirs(py_lib_dir)
+            test.update_env(path, 'PYTHONPATH', py_lib_dir, os.pathsep)
+            test.update_env(path, 'PATH', join(os.getcwd(), 'bin'), os.pathsep)
+            DistutilsProcessor._python_path_set = py_lib_dir
+        # cache to avoid multiple installation of the same module
+        if path in self._installed:
+            return
+        if not exists(join(path, 'setup.py')):
+            raise SetupException('No file %s' % abspath(join(path, 'setup.py')))
+        self._installed.add(path)
+        cmd_args = ['python', 'setup.py', 'install', '--home',
+                    join(test.tmpdir, 'local')]
+        if not self.options.get('verbose'):
+            cmd_args.append('--quiet')
+        with pushd(path):
+            cmd = Command(self.writer, cmd_args, raises=True)
+            cmd.run()
+            if exists('build'):
+                shutil.rmtree('build') # remove the build directory
+
+
+register('preprocessor', DistutilsProcessor)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/_apycotlib/repositories.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,185 @@
+"""Some standard sources repositories, + factory function
+
+:organization: Logilab
+:copyright: 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+:license: General Public License version 2 - http://www.gnu.org/licenses
+"""
+__docformat__ = "restructuredtext en"
+
+from os import path as osp
+from time import localtime
+
+from logilab.common.textutils import split_url_or_path
+
+from apycotlib import register, get_registered, ConfigError
+
+
+SUPPORTED_REPO_TYPES = ('mercurial', 'subversion')
+
+
+def get_repository(attrs):
+    """factory method: return a repository implementation according to
+    <attrs> (a dictionary)
+    """
+    repo_type = attrs['repository'].type
+    assert repo_type in SUPPORTED_REPO_TYPES, repo_type
+    return get_registered('repository', repo_type)(attrs)
+
+class VersionedRepository:
+    """base class for versionned repository"""
+
+    id = None
+    default_branch = None
+
+    def __init__(self, attrs):
+        try:
+            self.repository = attrs.pop('repository')
+        except KeyError, ex:
+            raise ConfigError('Missing %s option: %s' % (ex, attrs))
+        if not self.repository:
+            raise ConfigError('Repository must be specified (%s)' % (attrs,))
+        self.path = attrs.pop('path', '')
+        branch = attrs.pop('branch', None)
+        if branch is None:
+            branch = self.default_branch
+        self.branch = branch
+        self.ref_repo = self._ref_repo()
+        assert self.ref_repo
+        # relative path where the project will be located in the test
+        # environment
+        self.co_path = self._co_path()
+
+    def __eq__(self, other):
+        return (isinstance(other, self.__class__) and
+                self.repository == other.repository and
+                self.path == other.path and
+                self.branch == other.branch)
+
+    def __ne__(self, other):
+        return not self == other
+
+    def __repr__(self):
+        """get a string synthetizing the location"""
+        myrepr = '%s:%s' % (self.id, self.repository.source_url or self.repository.path)
+        if self.path:
+            myrepr += '/' + self.path
+        if self.branch:
+            myrepr += '@%s' % self.branch
+        return myrepr
+
+    def co_command(self, quiet=True):
+        """return a command that may be given to os.system to check out a given
+        package
+        """
+        raise NotImplementedError()
+
+    def co_move_to_branch_command(self, quiet=True):
+        return None
+
+    def normalize_date(self, from_date, to_date):
+        """get dates as float or local time and return the normalized dates as
+        local time tuple to fetch log information from <from_date> to <to_date>
+        included
+        """
+        if isinstance(from_date, float):
+            from_date = localtime(from_date)
+        if isinstance(to_date, float):
+            to_date = localtime(to_date)
+        return (from_date, to_date)
+
+    def revision(self):
+        """return revision of the working directory"""
+        return None
+
+
+def _get_latest_part(string):
+    parts = string.rstrip('/').rsplit('/', 1)
+    if len(parts) > 1:
+        return parts[1]
+    return parts[0]
+
+
+class SVNRepository(VersionedRepository):
+    """extract sources/information for a project from a SVN repository"""
+    id = 'subversion'
+
+    def _ref_repo(self):
+        return self.repository.source_url
+
+    def _co_path(self):
+        """return the relative path where the project will be located
+        in the test environment
+        """
+        # only url components
+        if self.branch:
+            return _get_latest_part(self.branch)
+        if self.path:
+            return _get_latest_part(self.path)
+        return self.ref_repo.rstrip('/').rsplit('/', 1)[1]
+
+    def co_command(self, quiet=True):
+        """return a command that may be given to os.system to check out a given
+        package
+        """
+        if quiet:
+            quiet ='-q'
+        else:
+            quiet = ''
+        repository = self.ref_repo
+        if self.branch:
+            repository += '/%s' % self.branch
+        if self.path:
+            repository += '/%s' % self.path
+        return 'svn checkout --non-interactive %s %s' % (quiet, repository)
+
+register('repository', SVNRepository)
+
+
+class HGRepository(VersionedRepository):
+    """extract sources/information for a project from a Mercurial repository"""
+    id = 'mercurial'
+    default_branch = "default"
+
+    def _ref_repo(self):
+        return (self.repository.local_cache
+                or self.repository.path
+                or self.repository.source_url)
+
+    def _co_path(self):
+        """return the relative path where the project will be located in the
+        test environment
+        """
+        copath = split_url_or_path(self.ref_repo)[1]
+        if self.path:
+            return osp.join(copath, self.path)
+        return copath
+
+    def co_command(self, quiet=True):
+        """return a command that may be given to os.system to check out a given
+        package
+        """
+        if quiet:
+            return 'hg clone -q %s' % self.ref_repo
+        return 'hg clone %s' % self.ref_repo
+
+    def co_move_to_branch_command(self, quiet=True):
+        # if branch doesn't exists, stay in default
+        if self.branch:
+            return 'hg -R %s up %s' % (self.co_path, self.branch)
+        return None
+
+    def revision(self):
+        from logilab.common.hg import get_repository, short
+        repo = get_repository(self.co_path)
+        try: # hg < 1.0 (?)
+            ctx = repo.workingctx()
+        except AttributeError:
+            # hg > 1.0
+            ctx = repo[None]
+        parents = ctx.parents()
+        #assert len(parents) == 0 ?
+        return short(parents[0].node())
+
+register('repository', HGRepository)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/_apycotlib/writer.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,244 @@
+"""Writer sending data to a cubicweb instance which store it and may be used
+to display reports
+
+:organization: Logilab
+:copyright: 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+"""
+from __future__ import with_statement
+__docformat__ = "restructuredtext en"
+
+import os
+import logging
+import tarfile
+import tempfile
+import traceback
+from datetime import datetime
+from StringIO import StringIO
+from threading import RLock
+
+from logilab.mtconverter import xml_escape
+
+from cubicweb import Binary
+
+REVERSE_SEVERITIES = {
+    logging.DEBUG :   u'DEBUG',
+    logging.INFO :    u'INFO',
+    logging.WARNING : u'WARNING',
+    logging.ERROR :   u'ERROR',
+    logging.FATAL :   u'FATAL'
+    }
+
+ARCHIVE_EXT = '.tar.bz2'
+ARCHIVE_MODE = 'w:bz2'
+ARCHIVE_NAME = "apycot-archive-%(instance-id)s-%(exec-id)s"+ARCHIVE_EXT
+
+def make_archive_name(cwinstid, execution_id):
+    # replace ':' as tar use them to fetch archive over network
+    exec_data = {'exec-id':     execution_id,
+                 'instance-id': cwinstid,
+                }
+    return (ARCHIVE_NAME % exec_data).replace(':', '.')
+
+
+
+class AbstractLogWriter(object):
+
+    def _unicode(self, something):
+        if isinstance(something, str):
+            return unicode(something, 'utf-8', 'replace')
+        if not isinstance(something, unicode):
+            return unicode(something)
+        return something
+
+    def debug(self, *args, **kwargs):
+        """log an debug"""
+        self.log(logging.DEBUG, *args, **kwargs)
+
+    def info(self, *args, **kwargs):
+        """log an info"""
+        self.log(logging.INFO, *args, **kwargs)
+
+    def warning(self, *args, **kwargs):
+        """log a warning"""
+        self.log(logging.WARNING, *args, **kwargs)
+
+    def error(self, *args, **kwargs):
+        """log an error"""
+        self.log(logging.ERROR, *args, **kwargs)
+
+    def fatal(self, *args, **kwargs):
+        """log a fatal error"""
+        self.log(logging.FATAL, *args, **kwargs)
+
+    critical = fatal
+
+    def _msg_info(self, *args, **kwargs):
+        path = kwargs.pop('path', None)
+        line = kwargs.pop('line', None)
+        tb = kwargs.pop('tb', False)
+        assert not kwargs
+        if len(args) > 1:
+            args = [self._unicode(string) for string in args]
+            msg = args[0] % tuple(args[1:])
+        else:
+            assert args
+            msg = self._unicode(args[0])
+        if tb:
+            stream = StringIO()
+            traceback.print_exc(file=stream)
+            msg += '\n' + stream.getvalue()
+        return path, line, msg
+
+    def log(self, severity, *args, **kwargs):
+        """log a message of a given severity"""
+        path, line, msg = self._msg_info(*args, **kwargs)
+        self._log(severity, path, line, msg)
+
+    def _log(self, severity, path, line, msg):
+        raise NotImplementedError()
+
+
+class BaseDataWriter(AbstractLogWriter):
+    """print execution message on stderr and store Test execution data to
+    a CubicWeb instance (using the apycot cube)
+    """
+
+    def __init__(self, cnxh, target_eid):
+        self._cnxh = cnxh
+        # eid of the execution entity
+        self._eid = target_eid
+        self._logs = []
+        self._logs_sent = 0
+        self._lock = RLock()
+
+    def start(self):
+        pass
+
+    def end(self):
+        pass
+
+    def set_exec_status(self, status):
+        with self._lock:
+            self._cnxh.execute(
+                'SET X status %(status)s WHERE X eid %(x)s',
+                {'status': status, 'x': self._eid})
+            self._cnxh.commit()
+
+    def execution_info(self, *args, **kwargs):
+        msg = self._msg_info(*args, **kwargs)[-1]
+        if isinstance(msg, unicode):
+            msg = msg.encode('utf-8')
+        print msg
+
+    def _log(self, severity, path, line, msg):
+        encodedmsg = u'%s\t%s\t%s\t%s<br/>' % (severity, path or u'',
+                                               line or u'', xml_escape(msg))
+        self._logs.append(encodedmsg)
+
+    def raw(self, name, value, type=None, commit=True):
+        """give some raw data"""
+        with self._lock:
+            self._cnxh.cw.create_entity(
+                'CheckResultInfo', label=self._unicode(name),
+                value=self._unicode(value), type=type and unicode(type),
+                for_check=self._cnxh.cw.entity_from_eid(self._eid))
+            if commit:
+                self._cnxh.commit()
+
+    def refresh_log(self, flush=True):
+        log = self._logs
+        with self._lock:
+            if self._logs_sent < len(log):
+                self._cnxh.execute(
+                    'SET X log %(log)s WHERE X eid %(x)s',
+                    {'log': u'\n'.join(log), 'x': self._eid})
+                self._log_sent = len(log)
+            if flush:
+                self._cnxh.commit()
+
+
+class CheckDataWriter(BaseDataWriter):
+    """Writer intended to report Check level log and result."""
+
+    def start(self, checker):
+        """Register the given checker as started"""
+        with self._lock:
+            self._eid = self._cnxh.cw.create_entity(
+                'CheckResult', name=self._unicode(checker.id), status=u'processing',
+                starttime=datetime.now(),
+                during_execution=self._cnxh.cw.entity_from_eid(self._eid)).eid
+            options = ['%s=%s' % (k, v) for k, v in checker.options.iteritems()
+                       if k in checker.options_def
+                       and v != checker.options_def[k].get('default')]
+            if options:
+                self.info('\n'.join(options))
+                self.refresh_log(flush=False)
+            self._cnxh.commit()
+
+    def end(self, status):
+        """Register the given checker as closed with status <status>"""
+        with self._lock:
+            """end of the latest started check"""
+            self._cnxh.execute(
+                'SET X status %(status)s, X endtime %(endtime)s, X log %(log)s '
+                'WHERE X eid %(x)s',
+                {'status': self._unicode(status), 'endtime': datetime.now(),
+                 'log': u'\n'.join(self._logs), 'x': self._eid})
+            self._cnxh.commit()
+
+
+class TestDataWriter(BaseDataWriter):
+    """Writer intended to report Test level log and result."""
+
+    def make_check_writer(self):
+        """Return a CheckDataWriter suitable to write checker log and result within this test"""
+        self.refresh_log()
+        return CheckDataWriter(self._cnxh, self._eid)
+
+    def link_to_revision(self, environment, vcsrepo):
+        revision = vcsrepo.revision()
+        if revision:
+            if not self._cnxh.execute(
+                'SET X using_revision REV '
+                'WHERE X eid %(x)s, REV changeset %(cs)s, '
+                'REV from_repository R, R eid %(r)s',
+                {'x': self._eid, 'cs': revision,
+                 'r': environment.repository.eid}):
+                self.raw(repr(vcsrepo), revision, 'revision')
+
+    def start(self):
+        self.set_exec_status(u'set up')
+
+    def end(self, status, archivedir=None):
+        """mark the current test as closed (with status <status>) and archive if requested."""
+        with self._lock:
+            """end of the test execution"""
+            if self._logs_sent < len(self._logs):
+                self._cnxh.execute('SET X status %(status)s, X log %(log)s WHERE X eid %(x)s',
+                                   {'log': u'\n'.join(self._logs),
+                                    'status': self._unicode(status),
+                                    'x': self._eid})
+            else:
+                self._cnxh.execute('SET X status %(status)s WHERE X eid %(x)s',
+                                   {'status': self._unicode(status),
+                                    'x': self._eid})
+            self._cnxh.commit()
+            if archivedir:
+                archive = make_archive_name(self._cnxh.cwinstid, self._eid)
+                archivefpath = os.path.join(tempfile.gettempdir(), archive)
+                tarball = tarfile.open(archivefpath, ARCHIVE_MODE)
+                try:
+                    tarball.add(archivedir)
+                    tarball.close()
+                    self._cnxh.cw.create_entity(
+                        'File', data=Binary(open(archivefpath, 'rb').read()),
+                        data_format=u'application/x-bzip2',
+                        data_name=unicode(archive),
+                        reverse_log_file=self._cnxh.cw.entity_from_eid(self._eid))
+                except:
+                    self.error('while archiving execution directory', tb=True)
+                finally:
+                    os.unlink(archivefpath)
+                self._cnxh.commit()
+
--- a/data/cubes.apycot.css	Wed Jul 28 12:10:03 2010 +0200
+++ b/data/cubes.apycot.css	Fri Sep 10 14:14:42 2010 +0200
@@ -1,6 +1,6 @@
 /* sample css file for APyCoT reports
  *
- * Copyright (c) 2003-2009 LOGILAB S.A. (Paris, FRANCE).
+ * Copyright (c) 2003-2010 LOGILAB S.A. (Paris, FRANCE).
  * http://www.logilab.fr/ -- mailto:contact@logilab.fr
  */
 
@@ -21,6 +21,14 @@
            font-weight: bold;
           }
 
+table.projectEnvConfiguration th {
+     padding-left : 0.3em;
+}
+
+table.projectEnvConfiguration td {
+     padding-left : 0.5em;
+}
+
 table.apycotreport th {
      background-color : #DDDDDD;
      text-align : left;
--- a/debian/changelog	Wed Jul 28 12:10:03 2010 +0200
+++ b/debian/changelog	Fri Sep 10 14:14:42 2010 +0200
@@ -1,3 +1,9 @@
+apycot (2.0.0-1) unstable; urgency=low
+
+  * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr>  Tue, 07 Sep 2010 13:23:05 +0200
+
 cubicweb-apycot (1.10.0-1) unstable; urgency=low
 
   * new upstream release
--- a/debian/control	Wed Jul 28 12:10:03 2010 +0200
+++ b/debian/control	Fri Sep 10 14:14:42 2010 +0200
@@ -1,4 +1,4 @@
-Source: cubicweb-apycot
+Source: apycot
 Section: web
 Priority: optional
 Maintainer: Logilab Packaging Team <contact@logilab.fr>
@@ -17,3 +17,10 @@
  and provides multi-criteria reports.
  .
  CubicWeb is a semantic web application framework, see http://www.cubicweb.org
+
+Package: narval-apycot
+Architecture: all
+XB-Python-Version: ${python:Versions}
+Depends: ${python:Depends}, cubicweb-apycot (= ${source:Version}), narval-bot
+Description: apycot extensions for the narval agent
+ Provides a basic set of Narval actions for Continuous Integration.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/debian/narval-apycot.dirs	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,3 @@
+var/lib
+usr/lib
+usr/share
--- a/debian/rules	Wed Jul 28 12:10:03 2010 +0200
+++ b/debian/rules	Fri Sep 10 14:14:42 2010 +0200
@@ -24,7 +24,9 @@
 	dh_clean -k
 	dh_installdirs -i
 	NO_SETUPTOOLS=1 python setup.py -q install --no-compile --prefix=debian/cubicweb-apycot/usr/
-	rm -rf debian/cubicweb-apycot/usr/lib/python*
+	mv debian/cubicweb-apycot/usr/share/apycot debian/narval-apycot/usr/share
+	mv debian/cubicweb-apycot/usr/var/lib/narval debian/narval-apycot/var/lib/
+	mv debian/cubicweb-apycot/usr/lib/python* debian/narval-apycot/usr/lib/
 
 
 # Build architecture-independent files here.
--- a/entities.py	Wed Jul 28 12:10:03 2010 +0200
+++ b/entities.py	Fri Sep 10 14:14:42 2010 +0200
@@ -8,48 +8,20 @@
 
 __docformat__ = "restructuredtext en"
 
+from itertools import chain
+
 from logilab.common.decorators import cached
-from logilab.common.textutils import TIME_UNITS, BYTE_UNITS, apply_units, splitstrip
+from logilab.common.textutils import text_to_dict
+from logilab.common.tasksqueue import LOW
 from logilab.mtconverter import xml_escape
 
+from cubicweb import ValidationError
 from cubicweb.entity import _marker
 from cubicweb.entities import AnyEntity, fetch_config
 
-
-def text_to_dict(text):
-    """parse multilines text containing simple 'key=value' lines and return a
-    dict of {'key': 'value'}. When the same key is encountered multiple time,
-    value is turned into a list containing all values.
-
-    >>> text_to_dict('''multiple=1
-    ... multiple= 2
-    ... single =3
-    ... ''')
-    {'single': '3', 'multiple': ['1', '2']}
+from cubes.narval.entities import Plan
 
-    """
-    res = {}
-    if not text:
-        return res
-    for line in text.splitlines():
-        line = line.strip()
-        if line:
-            key, value = [w.strip() for w in line.split('=', 1)]
-            if key in res:
-                try:
-                    res[key].append(value)
-                except AttributeError:
-                    res[key] = [res[key], value]
-            else:
-                res[key] = value
-    return res
-
-def vcsrepo_apycot_info(repo):
-    if repo.type == 'mercurial':
-        return 'hg', repo.path
-    return 'svn', repo.path
-
-class ExecutionRSSMixin(object):
+class ExecutionRSSMixin(object): # XXX move to an ui adapter
 
     RSS_LIMIT = 20
 
@@ -68,31 +40,85 @@
         raise NotImplementedError()
 
 
-class ProjectEnvironment(AnyEntity, ExecutionRSSMixin):
+class RefinementMixIn(object):
+
+    @property
+    def config_parent(self):
+        return self.refinement_of and self.refinement_of[0] or None
+
+    def config_parents(self, _done=None):
+        if _done is None:
+            _done = set()
+        _done.add(self.eid)
+        result = [self]
+        for parent in self.refinement_of:
+            if parent.eid in _done:
+                # XXX log error
+                continue
+            result += parent.config_parents(_done)
+        return result
+
+    def iter_refinements(self):
+        yield self
+        for refined in self.reverse_refinement_of:
+            for refined in refined.iter_refinements:
+                yield refined
+
+    def _regroup_dict(self, prop):
+        regroup = {}
+        for parent in reversed(self.config_parents()):
+            regroup.update(getattr(parent, prop))
+        return regroup
+
+    def owner_and_value(self, attr, isattr=False):
+        for parent in reversed(self.config_parents()):
+            value = getattr(parent, attr)
+            if isattr:
+                if value is not None:
+                    return parent, value
+            elif value:
+                return parent, value[0]
+        return None, None
+
+    def refined_attribute(self, attr):
+        for parent in reversed(self.config_parents()):
+            value = getattr(parent, attr)
+            if value is not None:
+                return value
+
+    def refined_relation(self, attr):
+        for parent in reversed(self.config_parents()):
+            value = getattr(parent, attr)
+            if value:
+                return value
+
+    # apycot bot helpers #######################################################
+
+    @property
+    def my_apycot_configuration(self):
+        return text_to_dict(self.check_config)
+
+    def apycot_configuration(self):
+        return self._regroup_dict('my_apycot_configuration')
+
+    @property
+    def my_apycot_process_environment(self):
+        return text_to_dict(self.check_environment)
+
+    def apycot_process_environment(self):
+        return self._regroup_dict('my_apycot_process_environment')
+
+
+# Project environment ##########################################################
+
+class ProjectEnvironment(RefinementMixIn, ExecutionRSSMixin, AnyEntity):
     __regid__ = 'ProjectEnvironment'
 
-    fetch_attrs, fetch_order = fetch_config(['name'])
+    fetch_attrs, fetch_order = fetch_config(['name', 'check_config', 'check_environment'])
 
-    def printable_value(self, attr, value=_marker, attrtype=None,
-                        format='text/html', displaytime=True):
-        """return a displayable value (i.e. unicode string) which may contains
-        html tags
-        """
-        attr = str(attr)
-        if value is _marker:
-            value = getattr(self, attr)
-        if value is None or value == '': # don't use "not", 0 is an acceptable value
-            return u''
-        if attr == 'vcs_path' and format == 'text/html':
-            if '://' in value:
-                return '<a href="%s">%s</a>' % (xml_escape(value),
-                                                xml_escape(value))
-            return xml_escape(value)
-        return super(ProjectEnvironment, self).printable_value(
-            attr, value, attrtype, format, displaytime)
     # rss related methods #####################################################
 
-
+    # XXX move methods below to an adapter
     def rss_description(self, vid='rss'):
         data = {
             'pe': self.dc_title(),
@@ -111,136 +137,70 @@
     # cube specific logic #####################################################
 
     @property
+    def project(self):
+        """tracker integration"""
+        if 'has_apycot_environment' in self._cw.vreg.schema:
+            projects = self.refined_relation('reverse_has_apycot_environment')
+            return projects and projects[0] or None
+
+    @property
     def repository(self):
-        return self.local_repository and self.local_repository[0] or None
+        repos = self.refined_relation('local_repository')
+        return repos and repos[0] or None
 
     def dependencies(self, _done=None):
         if _done is None:
             _done = set()
         _done.add(self.eid)
         result = []
-        for pe in self.needs_checkout:
-            if pe.eid in _done:
-                continue
-            result.append(pe)
-            result += pe.dependencies(_done)
+        if self.project:
+            # XXX include recommends?
+            for dp in chain(self.project.uses, self.project.recommends):
+                # use getattr since for instance ExternalProject has no apycot
+                # environment relation
+                for dpe in getattr(dp, 'has_apycot_environment', ()):
+                    if dpe.eid in _done:
+                        continue
+                    result.append(dpe)
+                    result += dpe.dependencies(_done)
         return result
 
-    # apycot bot helpers #######################################################
-
-    @property
-    def my_apycot_process_environment(self):
-        return text_to_dict(self.check_environment)
-
-    @property
-    def my_apycot_configuration(self):
-        return text_to_dict(self.check_config)
-
-    @property
-    def apycot_configuration(self):
-        return self.my_apycot_configuration
-
-    @property
-    def apycot_preprocessors(self):
-        return text_to_dict(self.check_preprocessors)
+    # XXX no reverse dependencies without tracker
+    def reverse_dependencies(self):
+        result = []
+        if self.project:
+            for dp in chain(self.project.reverse_uses, self.project.reverse_recommends):
+                for dpe in getattr(dp, 'has_apycot_environment', ()):
+                    result.append(dpe)
+        return result
 
-    @property
-    def apycot_repository_def(self):
-        if self.vcs_repository:
-            vcsrepo = self.vcs_repository
-            vcsrepotype = self.vcs_repository_type
-        elif self.repository:
-            vcsrepotype, vcsrepo = vcsrepo_apycot_info(self.repository)
-        else:
-            vcsrepo = vcsrepotype = None
-        repo_def = {
-            'repository_type': vcsrepotype,
-            'repository': vcsrepo,
-            'path': self.vcs_path
-            }
-        if 'branch' in self.apycot_configuration:
-            repo_def['branch'] = self.apycot_configuration['branch']
-        return repo_def
+    def all_configurations(self):
+        cfgs = {}
+        for parent in reversed(self.config_parents()):
+            for tc in parent.reverse_use_environment:
+                cfgs[tc.name] = (parent, tc)
+        return cfgs
 
-
-    # tracker integration ######################################################
-
-    @property
-    def project(self):
-        if 'has_apycot_environment' in self._cw.vreg.schema:
-            return self.reverse_has_apycot_environment[0]
+    def configuration_by_name(self, name, checkstatus=True):
+        for parent in reversed(self.config_parents()):
+            for tc in parent.reverse_use_environment:
+                if tc.name == name:
+                    if not check_status or \
+                           tc.cw_adapt_to('IWorkflowable').state == 'activated':
+                        return tc
+                    return
 
 
-class TestConfigGroup(AnyEntity):
-    __regid__ = 'TestConfigGroup'
-
-    fetch_attrs, fetch_order = fetch_config(['name', 'checks'])
-
-    def config_parts(self, _done=None):
-        if _done is None:
-            _done = set()
-        _done.add(self.eid)
-        result = [self]
-        for group in self.use_group:
-            if group.eid in _done:
-                continue
-            result += group.config_parts(_done)
-        return result
-    config_parts = cached(config_parts, keyarg=0)
-
-    @property
-    def all_checks(self):
-        try:
-            return self.all_checks_and_owner()[1]
-        except TypeError:
-            return None
-
-    def all_checks_and_owner(self):
-        for group in self.config_parts():
-            if group.checks:
-                return group, splitstrip(group.checks)
+# Test configuration ###########################################################
 
-    def _regroup_dict(self, prop, regroup=None):
-        if regroup is None:
-            regroup = {}
-        for group in reversed(self.config_parts()):
-            regroup.update(getattr(group, prop))
-        return regroup
-
-    # apycot bot helpers #######################################################
-
-    @property
-    def my_apycot_process_environment(self):
-        return text_to_dict(self.check_environment)
-
-    @property
-    def apycot_process_environment(self):
-        return self._regroup_dict('my_apycot_process_environment')
-
-    @property
-    def my_apycot_configuration(self):
-        return text_to_dict(self.check_config)
-
-    @property
-    def apycot_configuration(self):
-        return self._regroup_dict('my_apycot_configuration')
-
-    # XXX for 1.4 migration
-    @property
-    def apycot_preprocessors(self):
-        return text_to_dict(self.check_preprocessors)
-
-
-class TestConfig(TestConfigGroup, ExecutionRSSMixin):
+class TestConfig(RefinementMixIn, ExecutionRSSMixin, AnyEntity):
     __regid__ = 'TestConfig'
 
-    def dc_title(self):
-        return '%s / %s' % (self.environment.name, self.name)
+    fetch_attrs, fetch_order = fetch_config(['name', 'label', 'check_config',
+                                             'check_environment'])
 
-    def rest_path(self, use_ext_eid=False):
-        return u'%s/%s' % (self.environment.rest_path(),
-                           self._cw.url_quote(self.name))
-
+    def dc_title(self):
+        return self.label or self.name
 
     # rss related methods #####################################################
 
@@ -262,111 +222,123 @@
     # cube specific logic #####################################################
 
     @property
-    def environment(self):
-        return self.use_environment[0]
+    def recipe(self):
+        recipes = self.refined_relation('use_recipe')
+        return recipes and recipes[0] or None
+
+    @property
+    def start_reverse_dependencies(self):
+        return self.refined_attribute('start_rev_deps')
+
+    def iter_environments(self):
+        for penv in self.use_environment:
+            yield penv
+            for penv_ in penv.iter_refinements():
+                if penv is penv_:
+                    continue
+                if penv_.configuration_by_name(self.name) is self:
+                    yield penv
 
-    def dependencies(self):
+    def apycot_configuration(self, environment=None):
+        config = super(TestConfig, self).apycot_configuration()
+        if environment is not None:
+            config.update(environment.apycot_configuration())
+        return config
+
+    def environment_dependencies_rset(self, environment):
+        return self._cw.execute(
+            'Any DPE WHERE TC eid %(tc)s, X for_testconfig TC, '
+            'PE eid %(pe)s, X for_environment PE, X on_environment DPE',
+            {'tc': self.eid, 'pe': environment.eid})
+
+    def dependencies(self, environment):
         _done = set()
-        result = self.environment.dependencies(_done)
-        for dpe in self.needs_checkout:
+        result = environment.dependencies(_done)
+        for dpe in self.environment_dependencies_rset(environment).entities():
             if dpe.eid in _done:
                 continue
             result.append(dpe)
             result += dpe.dependencies(_done)
         return result
 
-    @cached
-    def all_check_results(self):
-        rset = self._cw.execute('Any MAX(X), XN GROUPBY XN, EXB ORDERBY XN '
-                                'WHERE X is CheckResult, X name XN, '
-                                'X during_execution EX, EX using_config C, '
-                                'EX branch EXB, C eid %(c)s',
-                                {'c': self.eid})
-        return list(rset.entities())
-
-    def latest_execution(self):
-        rset = self._cw.execute('Any X, C ORDERBY X DESC LIMIT 1'
-                                'WHERE X is TestExecution, X using_config C, '
-                                'C eid %(c)s', {'c': self.eid})
-        if rset:
-            return rset.get_entity(0, 0)
-
-    def latest_full_execution(self):
-        rset = self._cw.execute('Any X, C, COUNT(CR) GROUPBY X, C '
-                                'ORDERBY 3 DESC, X DESC LIMIT 1'
-                                'WHERE X is TestExecution, X using_config C, '
-                                'C eid %(c)s, CR during_execution X',
-                                {'c': self.eid})
-        if rset:
-            return rset.get_entity(0, 0)
-
-    def latest_check_result_by_name(self, name, branch):
-        for cr in self.all_check_results():
-            if cr.name == name and cr.execution.branch == branch:
-                return cr
-
-    def match_branch(self, branch):
-        return self.apycot_configuration.get('branch', branch) == branch
-
-    # apycot bot helpers #######################################################
+    def match_branch(self, pe, branch):
+        return self.apycot_configuration(pe).get('branch', branch) == branch
 
-    def _regroup_dict(self, prop, with_pe=True):
-        if with_pe:
-            regroup = getattr(self.environment, prop).copy()
+    def start(self, pe, branch=None, start_rev_deps=None, priority=LOW,
+              archive=False, check_duplicate=True):
+        if self.recipe is None:
+            raise ValidationError(self.eid, {None: 'configuration has no recipe'})
+        # don't overwrite branch hardcoded on the environment
+        pecfg = pe.apycot_configuration()
+        if pecfg.get('branch'):
+            branch = pecfg['branch']
+        elif branch is None: # XXX shouldn't occurs?
+            branch = self.apycot_configuration().get('branch')
+        if branch is None:
+            branch = pe.repository.default_branch()
+        cfg = self.apycot_configuration(pe)
+        if cfg:
+            arguments = u'Options(%s)' % cfg
         else:
-            regroup = {}
-        return super(TestConfig, self)._regroup_dict(prop, regroup)
-
-    @property
-    def apycot_process_environment(self):
-        return self._regroup_dict('my_apycot_process_environment')
-
-    @property
-    @cached
-    def apycot_configuration(self):
-        return self._apycot_configuration()
-
-    @property
-    @cached
-    def apycot_tc_configuration(self):
-        return self._apycot_configuration(with_pe=False)
-
-    def _apycot_configuration(self, with_pe=True):
-        config = self._regroup_dict('my_apycot_configuration', with_pe=with_pe)
-        for option in (u'max-cpu-time', u'max-reprieve', u'max-time'):
-            if option in config:
-                config[option] = apply_units(config[option], TIME_UNITS)
-        if u'max-memory' in config:
-            config[u'max-memory'] = apply_units(config[u'max-memory'],
-                                                BYTE_UNITS)
-        return config
-
-    # tracker integration ######################################################
-
-    @property
-    def project(self):
-        """tracker integration"""
-        try:
-            return self.entity.reverse_has_apycot_environment[0]
-        except (AttributeError, IndexError):
-            return None
+            arguments = u'Options(%s)' % cfg
+        duplicate_rset = check_duplicate and self._cw.execute(
+            "Any X WHERE X branch %(branch)s, X status 'waiting execution', "
+            "X using_environment PE, PE eid %(pe)s, "
+            "X using_config TC, TC eid %(tc)s",
+            {'branch': branch, 'pe': pe.eid, 'tc': self.eid})
+        if duplicate_rset:
+            assert len(duplicate_rset) == 1
+            texec = duplicate_rset.get_entity(0,0)
+            # if priority > duplicate.priority:
+            #     duplicate.set_attributes(priority=priority)
+            # if archive:
+            #     for option_line in duplicate.options.splitlines():
+            #         if option_line:
+            #             option, value = option_line.split('=')
+            #             if option == 'archive':
+            #                 dup_arch = bool(value)
+            #                 if not dup_arch:
+            #                     duplicate.set_attributes(archive=False)
+        else:
+            options = u'archive=%s' % archive
+            texec = self._cw.create_entity(
+                'TestExecution', priority=priority, arguments=arguments,
+                options=options, execution_of=self.recipe,
+                branch=branch, using_environment=pe, using_config=self)
+        if start_rev_deps or (start_rev_deps is None and self.start_reverse_dependencies):
+            for dpe in pe.reverse_dependencies():
+                tc = dpe.configuration_by_name(self.name)
+                if tc is not None:
+                    tc.start(dpe, branch=branch, start_rev_deps=False,
+                             priority=priority)
+        return texec
 
 
-class TestExecution(AnyEntity, ExecutionRSSMixin):
+class TestExecution(Plan, ExecutionRSSMixin):
     __regid__ = 'TestExecution'
 
-    def rest_path(self, use_ext_eid=False):
-        return u'%s/%s' % (self.configuration.rest_path(),
-                           self.eid)
-
     def dc_title(self):
-        return self._cw._('Execution of %(config)s on %(date)s') % {
-            'config': self.configuration.dc_title(),
-            'date': self.printable_value('starttime')}
+        if self.starttime:
+            return self._cw._('Execution of %(pe)s/%(config)s on %(date)s') % {
+                'config': self.configuration.dc_title(),
+                'pe': self.environment.dc_title(),
+                'date': self.printable_value('starttime')}
+        else:
+            return self._cw._('Execution of %(pe)s/%(config)s') % {
+                'config': self.configuration.dc_title(),
+                'pe': self.environment.dc_title()}
 
     def dc_date(self, date_format=None):
         return self._cw.format_date(self.starttime, date_format=date_format)
 
+    def rest_path(self, use_ext_eid=False):
+        try:
+            return u'%s/%s/%s' % (self.environment.rest_path(),
+                                  self.configuration.name,
+                                  self.eid)
+        except IndexError:
+            return self.eid
+
     # rss related methods #####################################################
 
     def rss_description(self, vid='rss'):
@@ -392,13 +364,22 @@
     # cube specific logic #####################################################
 
     @property
-    def checkers(self):
-        return self.reverse_during_execution
+    def project(self):
+        """tracker integration"""
+        return self.environment.project
 
     @property
     def configuration(self):
         return self.using_config[0]
 
+    @property
+    def environment(self):
+        return self.using_environment[0]
+
+    @property
+    def checkers(self):
+        return self.reverse_during_execution
+
     def check_result_by_name(self, name):
         for cr in self.reverse_during_execution:
             if cr.name == name:
@@ -439,12 +420,6 @@
             if rev.repository.eid == repository.eid:
                 return rev
 
-    # tracker integration ######################################################
-
-    @property
-    def project(self):
-        return self.configuration.project
-
 
 class CheckResult(AnyEntity):
     __regid__ = 'CheckResult'
@@ -452,11 +427,9 @@
                                              'name', 'status'])
 
     def absolute_url(self, *args, **kwargs):
-        kwargs['tab'] = self.name
+        kwargs.setdefault('tab', self.name)
         return self.execution.absolute_url(*args, **kwargs)
 
-    # cube specific logic #####################################################
-
     @property
     def execution(self):
         return self.during_execution[0]
@@ -471,10 +444,17 @@
         return self.for_check[0]
 
 
-from logilab.common.pyro_ext import ns_get_proxy
+class TestDependency(AnyEntity):
+    __regid__ = 'TestDependency'
+
+    @property
+    def configuration(self):
+        return self.for_testconfig[0]
 
-def bot_proxy(config, cache):
-    if not 'botproxy' in cache:
-        cache['botproxy'] = ns_get_proxy(config['bot-pyro-id'], 'apycot',
-                                         nshost=config['bot-pyro-ns'])
-    return cache['botproxy']
+    @property
+    def from_environment(self):
+        return self.for_environment[0]
+
+    @property
+    def to_environment(self):
+        return self.on_environment[0]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ext/jslint.js	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,536 @@
+// (C)2002 Douglas Crockford
+// www.JSLint.com
+// Rhino Edition
+"use strict";var JSLINT=(function(){var adsafe_id,adsafe_may,adsafe_went,anonname,approved,atrule={media:true,'font-face':true,page:true},bang={'<':true,'<=':true,'==':true,'===':true,'!==':true,'!=':true,'>':true,'>=':true,'+':true,'-':true,'*':true,'/':true,'%':true},banned={'arguments':true,callee:true,caller:true,constructor:true,'eval':true,prototype:true,stack:true,unwatch:true,valueOf:true,watch:true},boolOptions={adsafe:true,bitwise:true,browser:true,cap:true,css:true,debug:true,devel:true,eqeqeq:true,es5:true,evil:true,forin:true,fragment:true,immed:true,laxbreak:true,newcap:true,nomen:true,on:true,onevar:true,passfail:true,plusplus:true,regexp:true,rhino:true,undef:true,safe:true,windows:true,strict:true,sub:true,white:true,widget:true},browser={addEventListener:false,blur:false,clearInterval:false,clearTimeout:false,close:false,closed:false,defaultStatus:false,document:false,event:false,focus:false,frames:false,getComputedStyle:false,history:false,Image:false,length:false,location:false,moveBy:false,moveTo:false,name:false,navigator:false,onbeforeunload:true,onblur:true,onerror:true,onfocus:true,onload:true,onresize:true,onunload:true,open:false,opener:false,Option:false,parent:false,print:false,removeEventListener:false,resizeBy:false,resizeTo:false,screen:false,scroll:false,scrollBy:false,scrollTo:false,setInterval:false,setTimeout:false,status:false,top:false,XMLHttpRequest:false},cssAttributeData,cssAny,cssColorData={"aliceblue":true,"antiquewhite":true,"aqua":true,"aquamarine":true,"azure":true,"beige":true,"bisque":true,"black":true,"blanchedalmond":true,"blue":true,"blueviolet":true,"brown":true,"burlywood":true,"cadetblue":true,"chartreuse":true,"chocolate":true,"coral":true,"cornflowerblue":true,"cornsilk":true,"crimson":true,"cyan":true,"darkblue":true,"darkcyan":true,"darkgoldenrod":true,"darkgray":true,"darkgreen":true,"darkkhaki":true,"darkmagenta":true,"darkolivegreen":true,"darkorange":true,"darkorchid":true,"darkred":true,"darksalmon":true,"darkseagreen":true,"darkslateblue":true,"darkslategray":true,"darkturquoise":true,"darkviolet":true,"deeppink":true,"deepskyblue":true,"dimgray":true,"dodgerblue":true,"firebrick":true,"floralwhite":true,"forestgreen":true,"fuchsia":true,"gainsboro":true,"ghostwhite":true,"gold":true,"goldenrod":true,"gray":true,"green":true,"greenyellow":true,"honeydew":true,"hotpink":true,"indianred":true,"indigo":true,"ivory":true,"khaki":true,"lavender":true,"lavenderblush":true,"lawngreen":true,"lemonchiffon":true,"lightblue":true,"lightcoral":true,"lightcyan":true,"lightgoldenrodyellow":true,"lightgreen":true,"lightpink":true,"lightsalmon":true,"lightseagreen":true,"lightskyblue":true,"lightslategray":true,"lightsteelblue":true,"lightyellow":true,"lime":true,"limegreen":true,"linen":true,"magenta":true,"maroon":true,"mediumaquamarine":true,"mediumblue":true,"mediumorchid":true,"mediumpurple":true,"mediumseagreen":true,"mediumslateblue":true,"mediumspringgreen":true,"mediumturquoise":true,"mediumvioletred":true,"midnightblue":true,"mintcream":true,"mistyrose":true,"moccasin":true,"navajowhite":true,"navy":true,"oldlace":true,"olive":true,"olivedrab":true,"orange":true,"orangered":true,"orchid":true,"palegoldenrod":true,"palegreen":true,"paleturquoise":true,"palevioletred":true,"papayawhip":true,"peachpuff":true,"peru":true,"pink":true,"plum":true,"powderblue":true,"purple":true,"red":true,"rosybrown":true,"royalblue":true,"saddlebrown":true,"salmon":true,"sandybrown":true,"seagreen":true,"seashell":true,"sienna":true,"silver":true,"skyblue":true,"slateblue":true,"slategray":true,"snow":true,"springgreen":true,"steelblue":true,"tan":true,"teal":true,"thistle":true,"tomato":true,"turquoise":true,"violet":true,"wheat":true,"white":true,"whitesmoke":true,"yellow":true,"yellowgreen":true},cssBorderStyle,cssBreak,cssLengthData={'%':true,'cm':true,'em':true,'ex':true,'in':true,'mm':true,'pc':true,'pt':true,'px':true},cssOverflow,devel={alert:false,confirm:false,console:false,Debug:false,opera:false,prompt:false},escapes={'\b':'\\b','\t':'\\t','\n':'\\n','\f':'\\f','\r':'\\r','"':'\\"','/':'\\/','\\':'\\\\'},funct,functionicity=['closure','exception','global','label','outer','unused','var'],functions,global,htmltag={a:{},abbr:{},acronym:{},address:{},applet:{},area:{empty:true,parent:' map '},article:{},aside:{},audio:{},b:{},base:{empty:true,parent:' head '},bdo:{},big:{},blockquote:{},body:{parent:' html noframes '},br:{empty:true},button:{},canvas:{parent:' body p div th td '},caption:{parent:' table '},center:{},cite:{},code:{},col:{empty:true,parent:' table colgroup '},colgroup:{parent:' table '},command:{parent:' menu '},datalist:{},dd:{parent:' dl '},del:{},details:{},dialog:{},dfn:{},dir:{},div:{},dl:{},dt:{parent:' dl '},em:{},embed:{},fieldset:{},figure:{},font:{},footer:{},form:{},frame:{empty:true,parent:' frameset '},frameset:{parent:' html frameset '},h1:{},h2:{},h3:{},h4:{},h5:{},h6:{},head:{parent:' html '},header:{},hgroup:{},html:{parent:'*'},hr:{empty:true},i:{},iframe:{},img:{empty:true},input:{empty:true},ins:{},kbd:{},keygen:{},label:{},legend:{parent:' details fieldset figure '},li:{parent:' dir menu ol ul '},link:{empty:true,parent:' head '},map:{},mark:{},menu:{},meta:{empty:true,parent:' head noframes noscript '},meter:{},nav:{},noframes:{parent:' html body '},noscript:{parent:' body head noframes '},object:{},ol:{},optgroup:{parent:' select '},option:{parent:' optgroup select '},output:{},p:{},param:{empty:true,parent:' applet object '},pre:{},progress:{},q:{},rp:{},rt:{},ruby:{},samp:{},script:{empty:true,parent:' body div frame head iframe p pre span '},section:{},select:{},small:{},span:{},source:{},strong:{},style:{parent:' head ',empty:true},sub:{},sup:{},table:{},tbody:{parent:' table '},td:{parent:' tr '},textarea:{},tfoot:{parent:' table '},th:{parent:' tr '},thead:{parent:' table '},time:{},title:{parent:' head '},tr:{parent:' table tbody thead tfoot '},tt:{},u:{},ul:{},'var':{},video:{}},ids,implied,inblock,indent,jsonmode,lines,lookahead,member,membersOnly,nexttoken,noreach,option,predefined,prereg,prevtoken,rhino={defineClass:false,deserialize:false,gc:false,help:false,load:false,loadClass:false,print:false,quit:false,readFile:false,readUrl:false,runCommand:false,seal:false,serialize:false,spawn:false,sync:false,toint32:false,version:false},scope,windows={ActiveXObject:false,CScript:false,Debug:false,Enumerator:false,System:false,VBArray:false,WScript:false},src,stack,standard={Array:false,Boolean:false,Date:false,decodeURI:false,decodeURIComponent:false,encodeURI:false,encodeURIComponent:false,Error:false,'eval':false,EvalError:false,Function:false,hasOwnProperty:false,isFinite:false,isNaN:false,JSON:false,Math:false,Number:false,Object:false,parseInt:false,parseFloat:false,RangeError:false,ReferenceError:false,RegExp:false,String:false,SyntaxError:false,TypeError:false,URIError:false},standard_member={E:true,LN2:true,LN10:true,LOG2E:true,LOG10E:true,PI:true,SQRT1_2:true,SQRT2:true,MAX_VALUE:true,MIN_VALUE:true,NEGATIVE_INFINITY:true,POSITIVE_INFINITY:true},strict_mode,syntax={},tab,token,urls,warnings,widget={alert:true,animator:true,appleScript:true,beep:true,bytesToUIString:true,Canvas:true,chooseColor:true,chooseFile:true,chooseFolder:true,closeWidget:true,COM:true,convertPathToHFS:true,convertPathToPlatform:true,CustomAnimation:true,escape:true,FadeAnimation:true,filesystem:true,Flash:true,focusWidget:true,form:true,FormField:true,Frame:true,HotKey:true,Image:true,include:true,isApplicationRunning:true,iTunes:true,konfabulatorVersion:true,log:true,md5:true,MenuItem:true,MoveAnimation:true,openURL:true,play:true,Point:true,popupMenu:true,preferenceGroups:true,preferences:true,print:true,prompt:true,random:true,Rectangle:true,reloadWidget:true,ResizeAnimation:true,resolvePath:true,resumeUpdates:true,RotateAnimation:true,runCommand:true,runCommandInBg:true,saveAs:true,savePreferences:true,screen:true,ScrollBar:true,showWidgetPreferences:true,sleep:true,speak:true,Style:true,suppressUpdates:true,system:true,tellWidget:true,Text:true,TextArea:true,Timer:true,unescape:true,updateNow:true,URL:true,Web:true,widget:true,Window:true,XMLDOM:true,XMLHttpRequest:true,yahooCheckLogin:true,yahooLogin:true,yahooLogout:true},xmode,xquote,ax=/@cc|<\/?|script|\]*s\]|<\s*!|&lt/i,cx=/[\u0000-\u001f\u007f-\u009f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/,tx=/^\s*([(){}\[.,:;'"~\?\]#@]|==?=?|\/(\*(jslint|members?|global)?|=|\/)?|\*[\/=]?|\+[+=]?|-[\-=]?|%=?|&[&=]?|\|[|=]?|>>?>?=?|<([\/=!]|\!(\[|--)?|<=?)?|\^=?|\!=?=?|[a-zA-Z_$][a-zA-Z0-9_$]*|[0-9]+([xX][0-9a-fA-F]+|\.[0-9]*)?([eE][+\-]?[0-9]+)?)/,hx=/^\s*(['"=>\/&#]|<(?:\/|\!(?:--)?)?|[a-zA-Z][a-zA-Z0-9_\-]*|[0-9]+|--)/,nx=/[\u0000-\u001f&<"\/\\\u007f-\u009f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/,nxg=/[\u0000-\u001f&<"\/\\\u007f-\u009f\u00ad\u0600-\u0604\u070f\u17b4\u17b5\u200c-\u200f\u2028-\u202f\u2060-\u206f\ufeff\ufff0-\uffff]/g,ox=/[>&]|<[\/!]?|--/,lx=/\*\/|\/\*/,ix=/^([a-zA-Z_$][a-zA-Z0-9_$]*)$/,jx=/^(?:javascript|jscript|ecmascript|vbscript|mocha|livescript)\s*:/i,ux=/&|\+|\u00AD|\.\.|\/\*|%[^;]|base64|url|expression|data|mailto/i,sx=/^\s*([{:#%.=,>+\[\]@()"';]|\*=?|\$=|\|=|\^=|~=|[a-zA-Z_][a-zA-Z0-9_\-]*|[0-9]+|<\/|\/\*)/,ssx=/^\s*([@#!"'};:\-%.=,+\[\]()*_]|[a-zA-Z][a-zA-Z0-9._\-]*|\/\*?|\d+(?:\.\d+)?|<\/)/,qx=/[^a-zA-Z0-9+\-_\/ ]/,dx=/[\[\]\/\\"'*<>.&:(){}+=#]/,rx={outer:hx,html:hx,style:sx,styleproperty:ssx};function F(){}
+if(typeof Object.create!=='function'){Object.create=function(o){F.prototype=o;return new F();};}
+function is_own(object,name){return Object.prototype.hasOwnProperty.call(object,name);}
+function combine(t,o){var n;for(n in o){if(is_own(o,n)){t[n]=o[n];}}}
+String.prototype.entityify=function(){return this.replace(/&/g,'&amp;').replace(/</g,'&lt;').replace(/>/g,'&gt;');};String.prototype.isAlpha=function(){return(this>='a'&&this<='z\uffff')||(this>='A'&&this<='Z\uffff');};String.prototype.isDigit=function(){return(this>='0'&&this<='9');};String.prototype.supplant=function(o){return this.replace(/\{([^{}]*)\}/g,function(a,b){var r=o[b];return typeof r==='string'||typeof r==='number'?r:a;});};String.prototype.name=function(){if(ix.test(this)){return this;}
+if(nx.test(this)){return'"'+this.replace(nxg,function(a){var c=escapes[a];if(c){return c;}
+return'\\u'+('0000'+a.charCodeAt().toString(16)).slice(-4);})+'"';}
+return'"'+this+'"';};function assume(){if(!option.safe){if(option.rhino){combine(predefined,rhino);}
+if(option.devel){combine(predefined,devel);}
+if(option.browser){combine(predefined,browser);}
+if(option.windows){combine(predefined,windows);}
+if(option.widget){combine(predefined,widget);}}}
+function quit(m,l,ch){throw{name:'JSLintError',line:l,character:ch,message:m+" ("+Math.floor((l/lines.length)*100)+"% scanned)."};}
+function warning(m,t,a,b,c,d){var ch,l,w;t=t||nexttoken;if(t.id==='(end)'){t=token;}
+l=t.line||0;ch=t.from||0;w={id:'(error)',raw:m,evidence:lines[l-1]||'',line:l,character:ch,a:a,b:b,c:c,d:d};w.reason=m.supplant(w);JSLINT.errors.push(w);if(option.passfail){quit('Stopping. ',l,ch);}
+warnings+=1;if(warnings>=option.maxerr){quit("Too many errors.",l,ch);}
+return w;}
+function warningAt(m,l,ch,a,b,c,d){return warning(m,{line:l,from:ch},a,b,c,d);}
+function error(m,t,a,b,c,d){var w=warning(m,t,a,b,c,d);quit("Stopping, unable to continue.",w.line,w.character);}
+function errorAt(m,l,ch,a,b,c,d){return error(m,{line:l,from:ch},a,b,c,d);}
+var lex=(function lex(){var character,from,line,s;function nextLine(){var at;if(line>=lines.length){return false;}
+character=1;s=lines[line];line+=1;at=s.search(/ \t/);if(at>=0){warningAt("Mixed spaces and tabs.",line,at+1);}
+s=s.replace(/\t/g,tab);at=s.search(cx);if(at>=0){warningAt("Unsafe character.",line,at);}
+if(option.maxlen&&option.maxlen<s.length){warningAt("Line too long.",line,s.length);}
+return true;}
+function it(type,value){var i,t;if(type==='(color)'){t={type:type};}else if(type==='(punctuator)'||(type==='(identifier)'&&is_own(syntax,value))){t=syntax[value]||syntax['(error)'];}else{t=syntax[type];}
+t=Object.create(t);if(type==='(string)'||type==='(range)'){if(jx.test(value)){warningAt("Script URL.",line,from);}}
+if(type==='(identifier)'){t.identifier=true;if(value==='__iterator__'||value==='__proto__'){errorAt("Reserved name '{a}'.",line,from,value);}else if(option.nomen&&(value.charAt(0)==='_'||value.charAt(value.length-1)==='_')){warningAt("Unexpected {a} in '{b}'.",line,from,"dangling '_'",value);}}
+t.value=value;t.line=line;t.character=character;t.from=from;i=t.id;if(i!=='(endline)'){prereg=i&&(('(,=:[!&|?{};'.indexOf(i.charAt(i.length-1))>=0)||i==='return');}
+return t;}
+return{init:function(source){if(typeof source==='string'){lines=source.replace(/\r\n/g,'\n').replace(/\r/g,'\n').split('\n');}else{lines=source;}
+line=0;nextLine();from=1;},range:function(begin,end){var c,value='';from=character;if(s.charAt(0)!==begin){errorAt("Expected '{a}' and instead saw '{b}'.",line,character,begin,s.charAt(0));}
+for(;;){s=s.slice(1);character+=1;c=s.charAt(0);switch(c){case'':errorAt("Missing '{a}'.",line,character,c);break;case end:s=s.slice(1);character+=1;return it('(range)',value);case xquote:case'\\':warningAt("Unexpected '{a}'.",line,character,c);}
+value+=c;}},token:function(){var b,c,captures,d,depth,high,i,l,low,q,t;function match(x){var r=x.exec(s),r1;if(r){l=r[0].length;r1=r[1];c=r1.charAt(0);s=s.substr(l);from=character+l-r1.length;character+=l;return r1;}}
+function string(x){var c,j,r='';if(jsonmode&&x!=='"'){warningAt("Strings must use doublequote.",line,character);}
+if(xquote===x||(xmode==='scriptstring'&&!xquote)){return it('(punctuator)',x);}
+function esc(n){var i=parseInt(s.substr(j+1,n),16);j+=n;if(i>=32&&i<=126&&i!==34&&i!==92&&i!==39){warningAt("Unnecessary escapement.",line,character);}
+character+=n;c=String.fromCharCode(i);}
+j=0;for(;;){while(j>=s.length){j=0;if(xmode!=='html'||!nextLine()){errorAt("Unclosed string.",line,from);}}
+c=s.charAt(j);if(c===x){character+=1;s=s.substr(j+1);return it('(string)',r,x);}
+if(c<' '){if(c==='\n'||c==='\r'){break;}
+warningAt("Control character in string: {a}.",line,character+j,s.slice(0,j));}else if(c===xquote){warningAt("Bad HTML string",line,character+j);}else if(c==='<'){if(option.safe&&xmode==='html'){warningAt("ADsafe string violation.",line,character+j);}else if(s.charAt(j+1)==='/'&&(xmode||option.safe)){warningAt("Expected '<\\/' and instead saw '</'.",line,character);}else if(s.charAt(j+1)==='!'&&(xmode||option.safe)){warningAt("Unexpected '<!' in a string.",line,character);}}else if(c==='\\'){if(xmode==='html'){if(option.safe){warningAt("ADsafe string violation.",line,character+j);}}else if(xmode==='styleproperty'){j+=1;character+=1;c=s.charAt(j);if(c!==x){warningAt("Escapement in style string.",line,character+j);}}else{j+=1;character+=1;c=s.charAt(j);switch(c){case xquote:warningAt("Bad HTML string",line,character+j);break;case'\\':case'\'':case'"':case'/':break;case'b':c='\b';break;case'f':c='\f';break;case'n':c='\n';break;case'r':c='\r';break;case't':c='\t';break;case'u':esc(4);break;case'v':c='\v';break;case'x':if(jsonmode){warningAt("Avoid \\x-.",line,character);}
+esc(2);break;default:warningAt("Bad escapement.",line,character);}}}
+r+=c;character+=1;j+=1;}}
+for(;;){if(!s){return it(nextLine()?'(endline)':'(end)','');}
+while(xmode==='outer'){i=s.search(ox);if(i===0){break;}else if(i>0){character+=1;s=s.slice(i);break;}else{if(!nextLine()){return it('(end)','');}}}
+t=match(rx[xmode]||tx);if(!t){t='';c='';while(s&&s<'!'){s=s.substr(1);}
+if(s){if(xmode==='html'){return it('(error)',s.charAt(0));}else{errorAt("Unexpected '{a}'.",line,character,s.substr(0,1));}}}else{if(c.isAlpha()||c==='_'||c==='$'){return it('(identifier)',t);}
+if(c.isDigit()){if(xmode!=='style'&&!isFinite(Number(t))){warningAt("Bad number '{a}'.",line,character,t);}
+if(xmode!=='style'&&xmode!=='styleproperty'&&s.substr(0,1).isAlpha()){warningAt("Missing space after '{a}'.",line,character,t);}
+if(c==='0'){d=t.substr(1,1);if(d.isDigit()){if(token.id!=='.'&&xmode!=='styleproperty'){warningAt("Don't use extra leading zeros '{a}'.",line,character,t);}}else if(jsonmode&&(d==='x'||d==='X')){warningAt("Avoid 0x-. '{a}'.",line,character,t);}}
+if(t.substr(t.length-1)==='.'){warningAt("A trailing decimal point can be confused with a dot '{a}'.",line,character,t);}
+return it('(number)',t);}
+switch(t){case'"':case"'":return string(t);case'//':if(src||(xmode&&xmode!=='script')){warningAt("Unexpected comment.",line,character);}else if(xmode==='script'&&/<\s*\//i.test(s)){warningAt("Unexpected <\/ in comment.",line,character);}else if((option.safe||xmode==='script')&&ax.test(s)){warningAt("Dangerous comment.",line,character);}
+s='';token.comment=true;break;case'/*':if(src||(xmode&&xmode!=='script'&&xmode!=='style'&&xmode!=='styleproperty')){warningAt("Unexpected comment.",line,character);}
+if(option.safe&&ax.test(s)){warningAt("ADsafe comment violation.",line,character);}
+for(;;){i=s.search(lx);if(i>=0){break;}
+if(!nextLine()){errorAt("Unclosed comment.",line,character);}else{if(option.safe&&ax.test(s)){warningAt("ADsafe comment violation.",line,character);}}}
+character+=i+2;if(s.substr(i,1)==='/'){errorAt("Nested comment.",line,character);}
+s=s.substr(i+2);token.comment=true;break;case'/*members':case'/*member':case'/*jslint':case'/*global':case'*/':return{value:t,type:'special',line:line,character:character,from:from};case'':break;case'/':if(token.id==='/='){errorAt("A regular expression literal can be confused with '/='.",line,from);}
+if(prereg){depth=0;captures=0;l=0;for(;;){b=true;c=s.charAt(l);l+=1;switch(c){case'':errorAt("Unclosed regular expression.",line,from);return;case'/':if(depth>0){warningAt("Unescaped '{a}'.",line,from+l,'/');}
+c=s.substr(0,l-1);q={g:true,i:true,m:true};while(q[s.charAt(l)]===true){q[s.charAt(l)]=false;l+=1;}
+character+=l;s=s.substr(l);q=s.charAt(0);if(q==='/'||q==='*'){errorAt("Confusing regular expression.",line,from);}
+return it('(regexp)',c);case'\\':c=s.charAt(l);if(c<' '){warningAt("Unexpected control character in regular expression.",line,from+l);}else if(c==='<'){warningAt("Unexpected escaped character '{a}' in regular expression.",line,from+l,c);}
+l+=1;break;case'(':depth+=1;b=false;if(s.charAt(l)==='?'){l+=1;switch(s.charAt(l)){case':':case'=':case'!':l+=1;break;default:warningAt("Expected '{a}' and instead saw '{b}'.",line,from+l,':',s.charAt(l));}}else{captures+=1;}
+break;case'|':b=false;break;case')':if(depth===0){warningAt("Unescaped '{a}'.",line,from+l,')');}else{depth-=1;}
+break;case' ':q=1;while(s.charAt(l)===' '){l+=1;q+=1;}
+if(q>1){warningAt("Spaces are hard to count. Use {{a}}.",line,from+l,q);}
+break;case'[':c=s.charAt(l);if(c==='^'){l+=1;if(option.regexp){warningAt("Insecure '{a}'.",line,from+l,c);}}
+q=false;if(c===']'){warningAt("Empty class.",line,from+l-1);q=true;}
+klass:do{c=s.charAt(l);l+=1;switch(c){case'[':case'^':warningAt("Unescaped '{a}'.",line,from+l,c);q=true;break;case'-':if(q){q=false;}else{warningAt("Unescaped '{a}'.",line,from+l,'-');q=true;}
+break;case']':if(!q){warningAt("Unescaped '{a}'.",line,from+l-1,'-');}
+break klass;case'\\':c=s.charAt(l);if(c<' '){warningAt("Unexpected control character in regular expression.",line,from+l);}else if(c==='<'){warningAt("Unexpected escaped character '{a}' in regular expression.",line,from+l,c);}
+l+=1;q=true;break;case'/':warningAt("Unescaped '{a}'.",line,from+l-1,'/');q=true;break;case'<':if(xmode==='script'){c=s.charAt(l);if(c==='!'||c==='/'){warningAt("HTML confusion in regular expression '<{a}'.",line,from+l,c);}}
+q=true;break;default:q=true;}}while(c);break;case'.':if(option.regexp){warningAt("Insecure '{a}'.",line,from+l,c);}
+break;case']':case'?':case'{':case'}':case'+':case'*':warningAt("Unescaped '{a}'.",line,from+l,c);break;case'<':if(xmode==='script'){c=s.charAt(l);if(c==='!'||c==='/'){warningAt("HTML confusion in regular expression '<{a}'.",line,from+l,c);}}}
+if(b){switch(s.charAt(l)){case'?':case'+':case'*':l+=1;if(s.charAt(l)==='?'){l+=1;}
+break;case'{':l+=1;c=s.charAt(l);if(c<'0'||c>'9'){warningAt("Expected a number and instead saw '{a}'.",line,from+l,c);}
+l+=1;low=+c;for(;;){c=s.charAt(l);if(c<'0'||c>'9'){break;}
+l+=1;low=+c+(low*10);}
+high=low;if(c===','){l+=1;high=Infinity;c=s.charAt(l);if(c>='0'&&c<='9'){l+=1;high=+c;for(;;){c=s.charAt(l);if(c<'0'||c>'9'){break;}
+l+=1;high=+c+(high*10);}}}
+if(s.charAt(l)!=='}'){warningAt("Expected '{a}' and instead saw '{b}'.",line,from+l,'}',c);}else{l+=1;}
+if(s.charAt(l)==='?'){l+=1;}
+if(low>high){warningAt("'{a}' should not be greater than '{b}'.",line,from+l,low,high);}}}}
+c=s.substr(0,l-1);character+=l;s=s.substr(l);return it('(regexp)',c);}
+return it('(punctuator)',t);case'<!--':l=line;c=character;for(;;){i=s.indexOf('--');if(i>=0){break;}
+i=s.indexOf('<!');if(i>=0){errorAt("Nested HTML comment.",line,character+i);}
+if(!nextLine()){errorAt("Unclosed HTML comment.",l,c);}}
+l=s.indexOf('<!');if(l>=0&&l<i){errorAt("Nested HTML comment.",line,character+l);}
+character+=i;if(s[i+2]!=='>'){errorAt("Expected -->.",line,character);}
+character+=3;s=s.slice(i+3);break;case'#':if(xmode==='html'||xmode==='styleproperty'){for(;;){c=s.charAt(0);if((c<'0'||c>'9')&&(c<'a'||c>'f')&&(c<'A'||c>'F')){break;}
+character+=1;s=s.substr(1);t+=c;}
+if(t.length!==4&&t.length!==7){warningAt("Bad hex color '{a}'.",line,from+l,t);}
+return it('(color)',t);}
+return it('(punctuator)',t);default:if(xmode==='outer'&&c==='&'){character+=1;s=s.substr(1);for(;;){c=s.charAt(0);character+=1;s=s.substr(1);if(c===';'){break;}
+if(!((c>='0'&&c<='9')||(c>='a'&&c<='z')||c==='#')){errorAt("Bad entity",line,from+l,character);}}
+break;}
+return it('(punctuator)',t);}}}}};}());function addlabel(t,type){if(option.safe&&funct['(global)']&&typeof predefined[t]!=='boolean'){warning('ADsafe global: '+t+'.',token);}else if(t==='hasOwnProperty'){warning("'hasOwnProperty' is a really bad name.");}
+if(is_own(funct,t)&&!funct['(global)']){warning(funct[t]===true?"'{a}' was used before it was defined.":"'{a}' is already defined.",nexttoken,t);}
+funct[t]=type;if(funct['(global)']){global[t]=funct;if(is_own(implied,t)){warning("'{a}' was used before it was defined.",nexttoken,t);delete implied[t];}}else{scope[t]=funct;}}
+function doOption(){var b,obj,filter,o=nexttoken.value,t,v;switch(o){case'*/':error("Unbegun comment.");break;case'/*members':case'/*member':o='/*members';if(!membersOnly){membersOnly={};}
+obj=membersOnly;break;case'/*jslint':if(option.safe){warning("ADsafe restriction.");}
+obj=option;filter=boolOptions;break;case'/*global':if(option.safe){warning("ADsafe restriction.");}
+obj=predefined;break;default:}
+t=lex.token();loop:for(;;){for(;;){if(t.type==='special'&&t.value==='*/'){break loop;}
+if(t.id!=='(endline)'&&t.id!==','){break;}
+t=lex.token();}
+if(t.type!=='(string)'&&t.type!=='(identifier)'&&o!=='/*members'){error("Bad option.",t);}
+v=lex.token();if(v.id===':'){v=lex.token();if(obj===membersOnly){error("Expected '{a}' and instead saw '{b}'.",t,'*/',':');}
+if(t.value==='indent'&&o==='/*jslint'){b=+v.value;if(typeof b!=='number'||!isFinite(b)||b<=0||Math.floor(b)!==b){error("Expected a small integer and instead saw '{a}'.",v,v.value);}
+obj.white=true;obj.indent=b;}else if(t.value==='maxerr'&&o==='/*jslint'){b=+v.value;if(typeof b!=='number'||!isFinite(b)||b<=0||Math.floor(b)!==b){error("Expected a small integer and instead saw '{a}'.",v,v.value);}
+obj.maxerr=b;}else if(t.value==='maxlen'&&o==='/*jslint'){b=+v.value;if(typeof b!=='number'||!isFinite(b)||b<=0||Math.floor(b)!==b){error("Expected a small integer and instead saw '{a}'.",v,v.value);}
+obj.maxlen=b;}else if(v.value==='true'){obj[t.value]=true;}else if(v.value==='false'){obj[t.value]=false;}else{error("Bad option value.",v);}
+t=lex.token();}else{if(o==='/*jslint'){error("Missing option value.",t);}
+obj[t.value]=false;t=v;}}
+if(filter){assume();}}
+function peek(p){var i=p||0,j=0,t;while(j<=i){t=lookahead[j];if(!t){t=lookahead[j]=lex.token();}
+j+=1;}
+return t;}
+function advance(id,t){switch(token.id){case'(number)':if(nexttoken.id==='.'){warning("A dot following a number can be confused with a decimal point.",token);}
+break;case'-':if(nexttoken.id==='-'||nexttoken.id==='--'){warning("Confusing minusses.");}
+break;case'+':if(nexttoken.id==='+'||nexttoken.id==='++'){warning("Confusing plusses.");}
+break;}
+if(token.type==='(string)'||token.identifier){anonname=token.value;}
+if(id&&nexttoken.id!==id){if(t){if(nexttoken.id==='(end)'){warning("Unmatched '{a}'.",t,t.id);}else{warning("Expected '{a}' to match '{b}' from line {c} and instead saw '{d}'.",nexttoken,id,t.id,t.line,nexttoken.value);}}else if(nexttoken.type!=='(identifier)'||nexttoken.value!==id){warning("Expected '{a}' and instead saw '{b}'.",nexttoken,id,nexttoken.value);}}
+prevtoken=token;token=nexttoken;for(;;){nexttoken=lookahead.shift()||lex.token();if(nexttoken.id==='(end)'||nexttoken.id==='(error)'){return;}
+if(nexttoken.type==='special'){doOption();}else{if(nexttoken.id!=='(endline)'){break;}}}}
+function parse(rbp,initial){var left;if(nexttoken.id==='(end)'){error("Unexpected early end of program.",token);}
+advance();if(option.safe&&typeof predefined[token.value]==='boolean'&&(nexttoken.id!=='('&&nexttoken.id!=='.')){warning('ADsafe violation.',token);}
+if(initial){anonname='anonymous';funct['(verb)']=token.value;}
+if(initial===true&&token.fud){left=token.fud();}else{if(token.nud){left=token.nud();}else{if(nexttoken.type==='(number)'&&token.id==='.'){warning("A leading decimal point can be confused with a dot: '.{a}'.",token,nexttoken.value);advance();return token;}else{error("Expected an identifier and instead saw '{a}'.",token,token.id);}}
+while(rbp<nexttoken.lbp){advance();if(token.led){left=token.led(left);}else{error("Expected an operator and instead saw '{a}'.",token,token.id);}}}
+return left;}
+function adjacent(left,right){left=left||token;right=right||nexttoken;if(option.white||xmode==='styleproperty'||xmode==='style'){if(left.character!==right.from&&left.line===right.line){warning("Unexpected space after '{a}'.",right,left.value);}}}
+function nospace(left,right){left=left||token;right=right||nexttoken;if(option.white&&!left.comment){if(left.line===right.line){adjacent(left,right);}}}
+function nonadjacent(left,right){if(option.white){left=left||token;right=right||nexttoken;if(left.line===right.line&&left.character===right.from){warning("Missing space after '{a}'.",nexttoken,left.value);}}}
+function nobreaknonadjacent(left,right){left=left||token;right=right||nexttoken;if(!option.laxbreak&&left.line!==right.line){warning("Bad line breaking before '{a}'.",right,right.id);}else if(option.white){left=left||token;right=right||nexttoken;if(left.character===right.from){warning("Missing space after '{a}'.",nexttoken,left.value);}}}
+function indentation(bias){var i;if(option.white&&nexttoken.id!=='(end)'){i=indent+(bias||0);if(nexttoken.from!==i){warning("Expected '{a}' to have an indentation at {b} instead at {c}.",nexttoken,nexttoken.value,i,nexttoken.from);}}}
+function nolinebreak(t){t=t||token;if(t.line!==nexttoken.line){warning("Line breaking error '{a}'.",t,t.value);}}
+function comma(){if(token.line!==nexttoken.line){if(!option.laxbreak){warning("Bad line breaking before '{a}'.",token,nexttoken.id);}}else if(token.character!==nexttoken.from&&option.white){warning("Unexpected space after '{a}'.",nexttoken,token.value);}
+advance(',');nonadjacent(token,nexttoken);}
+function symbol(s,p){var x=syntax[s];if(!x||typeof x!=='object'){syntax[s]=x={id:s,lbp:p,value:s};}
+return x;}
+function delim(s){return symbol(s,0);}
+function stmt(s,f){var x=delim(s);x.identifier=x.reserved=true;x.fud=f;return x;}
+function blockstmt(s,f){var x=stmt(s,f);x.block=true;return x;}
+function reserveName(x){var c=x.id.charAt(0);if((c>='a'&&c<='z')||(c>='A'&&c<='Z')){x.identifier=x.reserved=true;}
+return x;}
+function prefix(s,f){var x=symbol(s,150);reserveName(x);x.nud=(typeof f==='function')?f:function(){this.right=parse(150);this.arity='unary';if(this.id==='++'||this.id==='--'){if(option.plusplus){warning("Unexpected use of '{a}'.",this,this.id);}else if((!this.right.identifier||this.right.reserved)&&this.right.id!=='.'&&this.right.id!=='['){warning("Bad operand.",this);}}
+return this;};return x;}
+function type(s,f){var x=delim(s);x.type=s;x.nud=f;return x;}
+function reserve(s,f){var x=type(s,f);x.identifier=x.reserved=true;return x;}
+function reservevar(s,v){return reserve(s,function(){if(this.id==='this'||this.id==='arguments'||this.id==='eval'){if(strict_mode&&funct['(global)']){warning("Strict violation.",this);}else if(option.safe){warning("ADsafe violation.",this);}}
+return this;});}
+function infix(s,f,p,w){var x=symbol(s,p);reserveName(x);x.led=function(left){if(!w){nobreaknonadjacent(prevtoken,token);nonadjacent(token,nexttoken);}
+if(typeof f==='function'){return f(left,this);}else{this.left=left;this.right=parse(p);return this;}};return x;}
+function relation(s,f){var x=symbol(s,100);x.led=function(left){nobreaknonadjacent(prevtoken,token);nonadjacent(token,nexttoken);var right=parse(100);if((left&&left.id==='NaN')||(right&&right.id==='NaN')){warning("Use the isNaN function to compare with NaN.",this);}else if(f){f.apply(this,[left,right]);}
+if(left.id==='!'){warning("Confusing use of '{a}'.",left,'!');}
+if(right.id==='!'){warning("Confusing use of '{a}'.",left,'!');}
+this.left=left;this.right=right;return this;};return x;}
+function isPoorRelation(node){return node&&((node.type==='(number)'&&+node.value===0)||(node.type==='(string)'&&node.value===' ')||node.type==='true'||node.type==='false'||node.type==='undefined'||node.type==='null');}
+function assignop(s,f){symbol(s,20).exps=true;return infix(s,function(left,that){var l;that.left=left;if(predefined[left.value]===false&&scope[left.value]['(global)']===true){warning('Read only.',left);}
+if(option.safe){l=left;do{if(typeof predefined[l.value]==='boolean'){warning('ADsafe violation.',l);}
+l=l.left;}while(l);}
+if(left){if(left.id==='.'||left.id==='['){if(!left.left||left.left.value==='arguments'){warning('Bad assignment.',that);}
+that.right=parse(19);return that;}else if(left.identifier&&!left.reserved){if(funct[left.value]==='exception'){warning("Do not assign to the exception parameter.",left);}
+that.right=parse(19);return that;}
+if(left===syntax['function']){warning("Expected an identifier in an assignment and instead saw a function invocation.",token);}}
+error("Bad assignment.",that);},20);}
+function bitwise(s,f,p){var x=symbol(s,p);reserveName(x);x.led=(typeof f==='function')?f:function(left){if(option.bitwise){warning("Unexpected use of '{a}'.",this,this.id);}
+this.left=left;this.right=parse(p);return this;};return x;}
+function bitwiseassignop(s){symbol(s,20).exps=true;return infix(s,function(left,that){if(option.bitwise){warning("Unexpected use of '{a}'.",that,that.id);}
+nonadjacent(prevtoken,token);nonadjacent(token,nexttoken);if(left){if(left.id==='.'||left.id==='['||(left.identifier&&!left.reserved)){parse(19);return that;}
+if(left===syntax['function']){warning("Expected an identifier in an assignment, and instead saw a function invocation.",token);}
+return that;}
+error("Bad assignment.",that);},20);}
+function suffix(s,f){var x=symbol(s,150);x.led=function(left){if(option.plusplus){warning("Unexpected use of '{a}'.",this,this.id);}else if((!left.identifier||left.reserved)&&left.id!=='.'&&left.id!=='['){warning("Bad operand.",this);}
+this.left=left;return this;};return x;}
+function optionalidentifier(){if(nexttoken.identifier){advance();if(option.safe&&banned[token.value]){warning("ADsafe violation: '{a}'.",token,token.value);}else if(token.reserved&&!option.es5){warning("Expected an identifier and instead saw '{a}' (a reserved word).",token,token.id);}
+return token.value;}}
+function identifier(){var i=optionalidentifier();if(i){return i;}
+if(token.id==='function'&&nexttoken.id==='('){warning("Missing name in function statement.");}else{error("Expected an identifier and instead saw '{a}'.",nexttoken,nexttoken.value);}}
+function reachable(s){var i=0,t;if(nexttoken.id!==';'||noreach){return;}
+for(;;){t=peek(i);if(t.reach){return;}
+if(t.id!=='(endline)'){if(t.id==='function'){warning("Inner functions should be listed at the top of the outer function.",t);break;}
+warning("Unreachable '{a}' after '{b}'.",t,t.value,s);break;}
+i+=1;}}
+function statement(noindent){var i=indent,r,s=scope,t=nexttoken;if(t.id===';'){warning("Unnecessary semicolon.",t);advance(';');return;}
+if(t.identifier&&!t.reserved&&peek().id===':'){advance();advance(':');scope=Object.create(s);addlabel(t.value,'label');if(!nexttoken.labelled){warning("Label '{a}' on {b} statement.",nexttoken,t.value,nexttoken.value);}
+if(jx.test(t.value+':')){warning("Label '{a}' looks like a javascript url.",t,t.value);}
+nexttoken.label=t.value;t=nexttoken;}
+if(!noindent){indentation();}
+r=parse(0,true);if(!t.block){if(!r||!r.exps){warning("Expected an assignment or function call and instead saw an expression.",token);}else if(r.id==='('&&r.left.id==='new'){warning("Do not use 'new' for side effects.");}
+if(nexttoken.id!==';'){warningAt("Missing semicolon.",token.line,token.from+token.value.length);}else{adjacent(token,nexttoken);advance(';');nonadjacent(token,nexttoken);}}
+indent=i;scope=s;return r;}
+function use_strict(){if(nexttoken.value==='use strict'){advance();advance(';');strict_mode=true;return true;}else{return false;}}
+function statements(begin){var a=[],f,p;if(begin&&!use_strict()&&option.strict){warning('Missing "use strict" statement.',nexttoken);}
+if(option.adsafe){switch(begin){case'script':if(!adsafe_may){if(nexttoken.value!=='ADSAFE'||peek(0).id!=='.'||(peek(1).value!=='id'&&peek(1).value!=='go')){error('ADsafe violation: Missing ADSAFE.id or ADSAFE.go.',nexttoken);}}
+if(nexttoken.value==='ADSAFE'&&peek(0).id==='.'&&peek(1).value==='id'){if(adsafe_may){error('ADsafe violation.',nexttoken);}
+advance('ADSAFE');advance('.');advance('id');advance('(');if(nexttoken.value!==adsafe_id){error('ADsafe violation: id does not match.',nexttoken);}
+advance('(string)');advance(')');advance(';');adsafe_may=true;}
+break;case'lib':if(nexttoken.value==='ADSAFE'){advance('ADSAFE');advance('.');advance('lib');advance('(');advance('(string)');comma();f=parse(0);if(f.id!=='function'){error('The second argument to lib must be a function.',f);}
+p=f.funct['(params)'];p=p&&p.join(', ');if(p&&p!=='lib'){error("Expected '{a}' and instead saw '{b}'.",f,'(lib)','('+p+')');}
+advance(')');advance(';');return a;}else{error("ADsafe lib violation.");}}}
+while(!nexttoken.reach&&nexttoken.id!=='(end)'){if(nexttoken.id===';'){warning("Unnecessary semicolon.");advance(';');}else{a.push(statement());}}
+return a;}
+function block(f){var a,b=inblock,old_indent=indent,s=scope,t;inblock=f;scope=Object.create(scope);nonadjacent(token,nexttoken);t=nexttoken;if(nexttoken.id==='{'){advance('{');if(nexttoken.id!=='}'||token.line!==nexttoken.line){indent+=option.indent;while(!f&&nexttoken.from>indent){indent+=option.indent;}
+if(!f){use_strict();}
+a=statements();indent-=option.indent;indentation();}
+advance('}',t);indent=old_indent;}else{warning("Expected '{a}' and instead saw '{b}'.",nexttoken,'{',nexttoken.value);noreach=true;a=[statement()];noreach=false;}
+funct['(verb)']=null;scope=s;inblock=b;return a;}
+function idValue(){return this;}
+function countMember(m){if(membersOnly&&typeof membersOnly[m]!=='boolean'){warning("Unexpected /*member '{a}'.",token,m);}
+if(typeof member[m]==='number'){member[m]+=1;}else{member[m]=1;}}
+function note_implied(token){var name=token.value,line=token.line,a=implied[name];if(typeof a==='function'){a=false;}
+if(!a){a=[line];implied[name]=a;}else if(a[a.length-1]!==line){a.push(line);}}
+function cssName(){if(nexttoken.identifier){advance();return true;}}
+function cssNumber(){if(nexttoken.id==='-'){advance('-');adjacent();nolinebreak();}
+if(nexttoken.type==='(number)'){advance('(number)');return true;}}
+function cssString(){if(nexttoken.type==='(string)'){advance();return true;}}
+function cssColor(){var i,number,value;if(nexttoken.identifier){value=nexttoken.value;if(value==='rgb'||value==='rgba'){advance();advance('(');for(i=0;i<3;i+=1){if(i){advance(',');}
+number=nexttoken.value;if(nexttoken.type!=='(number)'||number<0){warning("Expected a positive number and instead saw '{a}'",nexttoken,number);advance();}else{advance();if(nexttoken.id==='%'){advance('%');if(number>100){warning("Expected a percentage and instead saw '{a}'",token,number);}}else{if(number>255){warning("Expected a small number and instead saw '{a}'",token,number);}}}}
+if(value==='rgba'){advance(',');number=+nexttoken.value;if(nexttoken.type!=='(number)'||number<0||number>1){warning("Expected a number between 0 and 1 and instead saw '{a}'",nexttoken,number);}
+advance();if(nexttoken.id==='%'){warning("Unexpected '%'.");advance('%');}}
+advance(')');return true;}else if(cssColorData[nexttoken.value]===true){advance();return true;}}else if(nexttoken.type==='(color)'){advance();return true;}
+return false;}
+function cssLength(){if(nexttoken.id==='-'){advance('-');adjacent();nolinebreak();}
+if(nexttoken.type==='(number)'){advance();if(nexttoken.type!=='(string)'&&cssLengthData[nexttoken.value]===true){adjacent();advance();}else if(+token.value!==0){warning("Expected a linear unit and instead saw '{a}'.",nexttoken,nexttoken.value);}
+return true;}
+return false;}
+function cssLineHeight(){if(nexttoken.id==='-'){advance('-');adjacent();}
+if(nexttoken.type==='(number)'){advance();if(nexttoken.type!=='(string)'&&cssLengthData[nexttoken.value]===true){adjacent();advance();}
+return true;}
+return false;}
+function cssWidth(){if(nexttoken.identifier){switch(nexttoken.value){case'thin':case'medium':case'thick':advance();return true;}}else{return cssLength();}}
+function cssMargin(){if(nexttoken.identifier){if(nexttoken.value==='auto'){advance();return true;}}else{return cssLength();}}
+function cssAttr(){if(nexttoken.identifier&&nexttoken.value==='attr'){advance();advance('(');if(!nexttoken.identifier){warning("Expected a name and instead saw '{a}'.",nexttoken,nexttoken.value);}
+advance();advance(')');return true;}
+return false;}
+function cssCommaList(){while(nexttoken.id!==';'){if(!cssName()&&!cssString()){warning("Expected a name and instead saw '{a}'.",nexttoken,nexttoken.value);}
+if(nexttoken.id!==','){return true;}
+comma();}}
+function cssCounter(){if(nexttoken.identifier&&nexttoken.value==='counter'){advance();advance('(');if(!nexttoken.identifier){}
+advance();if(nexttoken.id===','){comma();if(nexttoken.type!=='(string)'){warning("Expected a string and instead saw '{a}'.",nexttoken,nexttoken.value);}
+advance();}
+advance(')');return true;}
+if(nexttoken.identifier&&nexttoken.value==='counters'){advance();advance('(');if(!nexttoken.identifier){warning("Expected a name and instead saw '{a}'.",nexttoken,nexttoken.value);}
+advance();if(nexttoken.id===','){comma();if(nexttoken.type!=='(string)'){warning("Expected a string and instead saw '{a}'.",nexttoken,nexttoken.value);}
+advance();}
+if(nexttoken.id===','){comma();if(nexttoken.type!=='(string)'){warning("Expected a string and instead saw '{a}'.",nexttoken,nexttoken.value);}
+advance();}
+advance(')');return true;}
+return false;}
+function cssShape(){var i;if(nexttoken.identifier&&nexttoken.value==='rect'){advance();advance('(');for(i=0;i<4;i+=1){if(!cssLength()){warning("Expected a number and instead saw '{a}'.",nexttoken,nexttoken.value);break;}}
+advance(')');return true;}
+return false;}
+function cssUrl(){var c,url;if(nexttoken.identifier&&nexttoken.value==='url'){nexttoken=lex.range('(',')');url=nexttoken.value;c=url.charAt(0);if(c==='"'||c==='\''){if(url.slice(-1)!==c){warning("Bad url string.");}else{url=url.slice(1,-1);if(url.indexOf(c)>=0){warning("Bad url string.");}}}
+if(!url){warning("Missing url.");}
+advance();if(option.safe&&ux.test(url)){error("ADsafe URL violation.");}
+urls.push(url);return true;}
+return false;}
+cssAny=[cssUrl,function(){for(;;){if(nexttoken.identifier){switch(nexttoken.value.toLowerCase()){case'url':cssUrl();break;case'expression':warning("Unexpected expression '{a}'.",nexttoken,nexttoken.value);advance();break;default:advance();}}else{if(nexttoken.id===';'||nexttoken.id==='!'||nexttoken.id==='(end)'||nexttoken.id==='}'){return true;}
+advance();}}}];cssBorderStyle=['none','hidden','dotted','dashed','solid','double','ridge','inset','outset'];cssBreak=['auto','always','avoid','left','right'];cssOverflow=['auto','hidden','scroll','visible'];cssAttributeData={background:[true,'background-attachment','background-color','background-image','background-position','background-repeat'],'background-attachment':['scroll','fixed'],'background-color':['transparent',cssColor],'background-image':['none',cssUrl],'background-position':[2,[cssLength,'top','bottom','left','right','center']],'background-repeat':['repeat','repeat-x','repeat-y','no-repeat'],'border':[true,'border-color','border-style','border-width'],'border-bottom':[true,'border-bottom-color','border-bottom-style','border-bottom-width'],'border-bottom-color':cssColor,'border-bottom-style':cssBorderStyle,'border-bottom-width':cssWidth,'border-collapse':['collapse','separate'],'border-color':['transparent',4,cssColor],'border-left':[true,'border-left-color','border-left-style','border-left-width'],'border-left-color':cssColor,'border-left-style':cssBorderStyle,'border-left-width':cssWidth,'border-right':[true,'border-right-color','border-right-style','border-right-width'],'border-right-color':cssColor,'border-right-style':cssBorderStyle,'border-right-width':cssWidth,'border-spacing':[2,cssLength],'border-style':[4,cssBorderStyle],'border-top':[true,'border-top-color','border-top-style','border-top-width'],'border-top-color':cssColor,'border-top-style':cssBorderStyle,'border-top-width':cssWidth,'border-width':[4,cssWidth],bottom:[cssLength,'auto'],'caption-side':['bottom','left','right','top'],clear:['both','left','none','right'],clip:[cssShape,'auto'],color:cssColor,content:['open-quote','close-quote','no-open-quote','no-close-quote',cssString,cssUrl,cssCounter,cssAttr],'counter-increment':[cssName,'none'],'counter-reset':[cssName,'none'],cursor:[cssUrl,'auto','crosshair','default','e-resize','help','move','n-resize','ne-resize','nw-resize','pointer','s-resize','se-resize','sw-resize','w-resize','text','wait'],direction:['ltr','rtl'],display:['block','compact','inline','inline-block','inline-table','list-item','marker','none','run-in','table','table-caption','table-cell','table-column','table-column-group','table-footer-group','table-header-group','table-row','table-row-group'],'empty-cells':['show','hide'],'float':['left','none','right'],font:['caption','icon','menu','message-box','small-caption','status-bar',true,'font-size','font-style','font-weight','font-family'],'font-family':cssCommaList,'font-size':['xx-small','x-small','small','medium','large','x-large','xx-large','larger','smaller',cssLength],'font-size-adjust':['none',cssNumber],'font-stretch':['normal','wider','narrower','ultra-condensed','extra-condensed','condensed','semi-condensed','semi-expanded','expanded','extra-expanded'],'font-style':['normal','italic','oblique'],'font-variant':['normal','small-caps'],'font-weight':['normal','bold','bolder','lighter',cssNumber],height:[cssLength,'auto'],left:[cssLength,'auto'],'letter-spacing':['normal',cssLength],'line-height':['normal',cssLineHeight],'list-style':[true,'list-style-image','list-style-position','list-style-type'],'list-style-image':['none',cssUrl],'list-style-position':['inside','outside'],'list-style-type':['circle','disc','square','decimal','decimal-leading-zero','lower-roman','upper-roman','lower-greek','lower-alpha','lower-latin','upper-alpha','upper-latin','hebrew','katakana','hiragana-iroha','katakana-oroha','none'],margin:[4,cssMargin],'margin-bottom':cssMargin,'margin-left':cssMargin,'margin-right':cssMargin,'margin-top':cssMargin,'marker-offset':[cssLength,'auto'],'max-height':[cssLength,'none'],'max-width':[cssLength,'none'],'min-height':cssLength,'min-width':cssLength,opacity:cssNumber,outline:[true,'outline-color','outline-style','outline-width'],'outline-color':['invert',cssColor],'outline-style':['dashed','dotted','double','groove','inset','none','outset','ridge','solid'],'outline-width':cssWidth,overflow:cssOverflow,'overflow-x':cssOverflow,'overflow-y':cssOverflow,padding:[4,cssLength],'padding-bottom':cssLength,'padding-left':cssLength,'padding-right':cssLength,'padding-top':cssLength,'page-break-after':cssBreak,'page-break-before':cssBreak,position:['absolute','fixed','relative','static'],quotes:[8,cssString],right:[cssLength,'auto'],'table-layout':['auto','fixed'],'text-align':['center','justify','left','right'],'text-decoration':['none','underline','overline','line-through','blink'],'text-indent':cssLength,'text-shadow':['none',4,[cssColor,cssLength]],'text-transform':['capitalize','uppercase','lowercase','none'],top:[cssLength,'auto'],'unicode-bidi':['normal','embed','bidi-override'],'vertical-align':['baseline','bottom','sub','super','top','text-top','middle','text-bottom',cssLength],visibility:['visible','hidden','collapse'],'white-space':['normal','nowrap','pre','pre-line','pre-wrap','inherit'],width:[cssLength,'auto'],'word-spacing':['normal',cssLength],'word-wrap':['break-word','normal'],'z-index':['auto',cssNumber]};function styleAttribute(){var v;while(nexttoken.id==='*'||nexttoken.id==='#'||nexttoken.value==='_'){if(!option.css){warning("Unexpected '{a}'.",nexttoken,nexttoken.value);}
+advance();}
+if(nexttoken.id==='-'){if(!option.css){warning("Unexpected '{a}'.",nexttoken,nexttoken.value);}
+advance('-');if(!nexttoken.identifier){warning("Expected a non-standard style attribute and instead saw '{a}'.",nexttoken,nexttoken.value);}
+advance();return cssAny;}else{if(!nexttoken.identifier){warning("Excepted a style attribute, and instead saw '{a}'.",nexttoken,nexttoken.value);}else{if(is_own(cssAttributeData,nexttoken.value)){v=cssAttributeData[nexttoken.value];}else{v=cssAny;if(!option.css){warning("Unrecognized style attribute '{a}'.",nexttoken,nexttoken.value);}}}
+advance();return v;}}
+function styleValue(v){var i=0,n,once,match,round,start=0,vi;switch(typeof v){case'function':return v();case'string':if(nexttoken.identifier&&nexttoken.value===v){advance();return true;}
+return false;}
+for(;;){if(i>=v.length){return false;}
+vi=v[i];i+=1;if(vi===true){break;}else if(typeof vi==='number'){n=vi;vi=v[i];i+=1;}else{n=1;}
+match=false;while(n>0){if(styleValue(vi)){match=true;n-=1;}else{break;}}
+if(match){return true;}}
+start=i;once=[];for(;;){round=false;for(i=start;i<v.length;i+=1){if(!once[i]){if(styleValue(cssAttributeData[v[i]])){match=true;round=true;once[i]=true;break;}}}
+if(!round){return match;}}}
+function styleChild(){if(nexttoken.id==='(number)'){advance();if(nexttoken.value==='n'&&nexttoken.identifier){adjacent();advance();if(nexttoken.id==='+'){adjacent();advance('+');adjacent();advance('(number)');}}
+return;}else{switch(nexttoken.value){case'odd':case'even':if(nexttoken.identifier){advance();return;}}}
+warning("Unexpected token '{a}'.",nexttoken,nexttoken.value);}
+function substyle(){var v;for(;;){if(nexttoken.id==='}'||nexttoken.id==='(end)'||xquote&&nexttoken.id===xquote){return;}
+while(nexttoken.id===';'){warning("Misplaced ';'.");advance(';');}
+v=styleAttribute();advance(':');if(nexttoken.identifier&&nexttoken.value==='inherit'){advance();}else{if(!styleValue(v)){warning("Unexpected token '{a}'.",nexttoken,nexttoken.value);advance();}}
+if(nexttoken.id==='!'){advance('!');adjacent();if(nexttoken.identifier&&nexttoken.value==='important'){advance();}else{warning("Expected '{a}' and instead saw '{b}'.",nexttoken,'important',nexttoken.value);}}
+if(nexttoken.id==='}'||nexttoken.id===xquote){warning("Missing '{a}'.",nexttoken,';');}else{advance(';');}}}
+function styleSelector(){if(nexttoken.identifier){if(!is_own(htmltag,nexttoken.value)){warning("Expected a tagName, and instead saw {a}.",nexttoken,nexttoken.value);}
+advance();}else{switch(nexttoken.id){case'>':case'+':advance();styleSelector();break;case':':advance(':');switch(nexttoken.value){case'active':case'after':case'before':case'checked':case'disabled':case'empty':case'enabled':case'first-child':case'first-letter':case'first-line':case'first-of-type':case'focus':case'hover':case'last-of-type':case'link':case'only-of-type':case'root':case'target':case'visited':advance();break;case'lang':advance();advance('(');if(!nexttoken.identifier){warning("Expected a lang code, and instead saw :{a}.",nexttoken,nexttoken.value);}
+advance(')');break;case'nth-child':case'nth-last-child':case'nth-last-of-type':case'nth-of-type':advance();advance('(');styleChild();advance(')');break;case'not':advance();advance('(');if(nexttoken.id===':'&&peek(0).value==='not'){warning("Nested not.");}
+styleSelector();advance(')');break;default:warning("Expected a pseudo, and instead saw :{a}.",nexttoken,nexttoken.value);}
+break;case'#':advance('#');if(!nexttoken.identifier){warning("Expected an id, and instead saw #{a}.",nexttoken,nexttoken.value);}
+advance();break;case'*':advance('*');break;case'.':advance('.');if(!nexttoken.identifier){warning("Expected a class, and instead saw #.{a}.",nexttoken,nexttoken.value);}
+advance();break;case'[':advance('[');if(!nexttoken.identifier){warning("Expected an attribute, and instead saw [{a}].",nexttoken,nexttoken.value);}
+advance();if(nexttoken.id==='='||nexttoken.value==='~='||nexttoken.value==='$='||nexttoken.value==='|='||nexttoken.id==='*='||nexttoken.id==='^='){advance();if(nexttoken.type!=='(string)'){warning("Expected a string, and instead saw {a}.",nexttoken,nexttoken.value);}
+advance();}
+advance(']');break;default:error("Expected a CSS selector, and instead saw {a}.",nexttoken,nexttoken.value);}}}
+function stylePattern(){var name;if(nexttoken.id==='{'){warning("Expected a style pattern, and instead saw '{a}'.",nexttoken,nexttoken.id);}else if(nexttoken.id==='@'){advance('@');name=nexttoken.value;if(nexttoken.identifier&&atrule[name]===true){advance();return name;}
+warning("Expected an at-rule, and instead saw @{a}.",nexttoken,name);}
+for(;;){styleSelector();if(nexttoken.id==='</'||nexttoken.id==='{'||nexttoken.id==='(end)'){return'';}
+if(nexttoken.id===','){comma();}}}
+function styles(){var i;while(nexttoken.id==='@'){i=peek();if(i.identifier&&i.value==='import'){advance('@');advance();if(!cssUrl()){warning("Expected '{a}' and instead saw '{b}'.",nexttoken,'url',nexttoken.value);advance();}
+advance(';');}else{break;}}
+while(nexttoken.id!=='</'&&nexttoken.id!=='(end)'){stylePattern();xmode='styleproperty';if(nexttoken.id===';'){advance(';');}else{advance('{');substyle();xmode='style';advance('}');}}}
+function doBegin(n){if(n!=='html'&&!option.fragment){if(n==='div'&&option.adsafe){error("ADSAFE: Use the fragment option.");}else{error("Expected '{a}' and instead saw '{b}'.",token,'html',n);}}
+if(option.adsafe){if(n==='html'){error("Currently, ADsafe does not operate on whole HTML documents. It operates on <div> fragments and .js files.",token);}
+if(option.fragment){if(n!=='div'){error("ADsafe violation: Wrap the widget in a div.",token);}}else{error("Use the fragment option.",token);}}
+option.browser=true;assume();}
+function doAttribute(n,a,v){var u,x;if(a==='id'){u=typeof v==='string'?v.toUpperCase():'';if(ids[u]===true){warning("Duplicate id='{a}'.",nexttoken,v);}
+if(!/^[A-Za-z][A-Za-z0-9._:\-]*$/.test(v)){warning("Bad id: '{a}'.",nexttoken,v);}else if(option.adsafe){if(adsafe_id){if(v.slice(0,adsafe_id.length)!==adsafe_id){warning("ADsafe violation: An id must have a '{a}' prefix",nexttoken,adsafe_id);}else if(!/^[A-Z]+_[A-Z]+$/.test(v)){warning("ADSAFE violation: bad id.");}}else{adsafe_id=v;if(!/^[A-Z]+_$/.test(v)){warning("ADSAFE violation: bad id.");}}}
+x=v.search(dx);if(x>=0){warning("Unexpected character '{a}' in {b}.",token,v.charAt(x),a);}
+ids[u]=true;}else if(a==='class'||a==='type'||a==='name'){x=v.search(qx);if(x>=0){warning("Unexpected character '{a}' in {b}.",token,v.charAt(x),a);}
+ids[u]=true;}else if(a==='href'||a==='background'||a==='content'||a==='data'||a.indexOf('src')>=0||a.indexOf('url')>=0){if(option.safe&&ux.test(v)){error("ADsafe URL violation.");}
+urls.push(v);}else if(a==='for'){if(option.adsafe){if(adsafe_id){if(v.slice(0,adsafe_id.length)!==adsafe_id){warning("ADsafe violation: An id must have a '{a}' prefix",nexttoken,adsafe_id);}else if(!/^[A-Z]+_[A-Z]+$/.test(v)){warning("ADSAFE violation: bad id.");}}else{warning("ADSAFE violation: bad id.");}}}else if(a==='name'){if(option.adsafe&&v.indexOf('_')>=0){warning("ADsafe name violation.");}}}
+function doTag(n,a){var i,t=htmltag[n],x;src=false;if(!t){error("Unrecognized tag '<{a}>'.",nexttoken,n===n.toLowerCase()?n:n+' (capitalization error)');}
+if(stack.length>0){if(n==='html'){error("Too many <html> tags.",token);}
+x=t.parent;if(x){if(x.indexOf(' '+stack[stack.length-1].name+' ')<0){error("A '<{a}>' must be within '<{b}>'.",token,n,x);}}else if(!option.adsafe&&!option.fragment){i=stack.length;do{if(i<=0){error("A '<{a}>' must be within '<{b}>'.",token,n,'body');}
+i-=1;}while(stack[i].name!=='body');}}
+switch(n){case'div':if(option.adsafe&&stack.length===1&&!adsafe_id){warning("ADSAFE violation: missing ID_.");}
+break;case'script':xmode='script';advance('>');indent=nexttoken.from;if(a.lang){warning("lang is deprecated.",token);}
+if(option.adsafe&&stack.length!==1){warning("ADsafe script placement violation.",token);}
+if(a.src){if(option.adsafe&&(!adsafe_may||!approved[a.src])){warning("ADsafe unapproved script source.",token);}
+if(a.type){warning("type is unnecessary.",token);}}else{if(adsafe_went){error("ADsafe script violation.",token);}
+statements('script');}
+xmode='html';advance('</');if(!nexttoken.identifier&&nexttoken.value!=='script'){warning("Expected '{a}' and instead saw '{b}'.",nexttoken,'script',nexttoken.value);}
+advance();xmode='outer';break;case'style':xmode='style';advance('>');styles();xmode='html';advance('</');if(!nexttoken.identifier&&nexttoken.value!=='style'){warning("Expected '{a}' and instead saw '{b}'.",nexttoken,'style',nexttoken.value);}
+advance();xmode='outer';break;case'input':switch(a.type){case'radio':case'checkbox':case'button':case'reset':case'submit':break;case'text':case'file':case'password':case'file':case'hidden':case'image':if(option.adsafe&&a.autocomplete!=='off'){warning("ADsafe autocomplete violation.");}
+break;default:warning("Bad input type.");}
+break;case'applet':case'body':case'embed':case'frame':case'frameset':case'head':case'iframe':case'noembed':case'noframes':case'object':case'param':if(option.adsafe){warning("ADsafe violation: Disallowed tag: "+n);}
+break;}}
+function closetag(n){return'</'+n+'>';}
+function html(){var a,attributes,e,n,q,t,v,w=option.white,wmode;xmode='html';xquote='';stack=null;for(;;){switch(nexttoken.value){case'<':xmode='html';advance('<');attributes={};t=nexttoken;if(!t.identifier){warning("Bad identifier {a}.",t,t.value);}
+n=t.value;if(option.cap){n=n.toLowerCase();}
+t.name=n;advance();if(!stack){stack=[];doBegin(n);}
+v=htmltag[n];if(typeof v!=='object'){error("Unrecognized tag '<{a}>'.",t,n);}
+e=v.empty;t.type=n;for(;;){if(nexttoken.id==='/'){advance('/');if(nexttoken.id!=='>'){warning("Expected '{a}' and instead saw '{b}'.",nexttoken,'>',nexttoken.value);}
+break;}
+if(nexttoken.id&&nexttoken.id.substr(0,1)==='>'){break;}
+if(!nexttoken.identifier){if(nexttoken.id==='(end)'||nexttoken.id==='(error)'){error("Missing '>'.",nexttoken);}
+warning("Bad identifier.");}
+option.white=true;nonadjacent(token,nexttoken);a=nexttoken.value;option.white=w;advance();if(!option.cap&&a!==a.toLowerCase()){warning("Attribute '{a}' not all lower case.",nexttoken,a);}
+a=a.toLowerCase();xquote='';if(is_own(attributes,a)){warning("Attribute '{a}' repeated.",nexttoken,a);}
+if(a.slice(0,2)==='on'){if(!option.on){warning("Avoid HTML event handlers.");}
+xmode='scriptstring';advance('=');q=nexttoken.id;if(q!=='"'&&q!=="'"){error("Missing quote.");}
+xquote=q;wmode=option.white;option.white=false;advance(q);statements('on');option.white=wmode;if(nexttoken.id!==q){error("Missing close quote on script attribute.");}
+xmode='html';xquote='';advance(q);v=false;}else if(a==='style'){xmode='scriptstring';advance('=');q=nexttoken.id;if(q!=='"'&&q!=="'"){error("Missing quote.");}
+xmode='styleproperty';xquote=q;advance(q);substyle();xmode='html';xquote='';advance(q);v=false;}else{if(nexttoken.id==='='){advance('=');v=nexttoken.value;if(!nexttoken.identifier&&nexttoken.id!=='"'&&nexttoken.id!=='\''&&nexttoken.type!=='(string)'&&nexttoken.type!=='(number)'&&nexttoken.type!=='(color)'){warning("Expected an attribute value and instead saw '{a}'.",token,a);}
+advance();}else{v=true;}}
+attributes[a]=v;doAttribute(n,a,v);}
+doTag(n,attributes);if(!e){stack.push(t);}
+xmode='outer';advance('>');break;case'</':xmode='html';advance('</');if(!nexttoken.identifier){warning("Bad identifier.");}
+n=nexttoken.value;if(option.cap){n=n.toLowerCase();}
+advance();if(!stack){error("Unexpected '{a}'.",nexttoken,closetag(n));}
+t=stack.pop();if(!t){error("Unexpected '{a}'.",nexttoken,closetag(n));}
+if(t.name!==n){error("Expected '{a}' and instead saw '{b}'.",nexttoken,closetag(t.name),closetag(n));}
+if(nexttoken.id!=='>'){error("Missing '{a}'.",nexttoken,'>');}
+xmode='outer';advance('>');break;case'<!':if(option.safe){warning("ADsafe HTML violation.");}
+xmode='html';for(;;){advance();if(nexttoken.id==='>'||nexttoken.id==='(end)'){break;}
+if(nexttoken.value.indexOf('--')>=0){warning("Unexpected --.");}
+if(nexttoken.value.indexOf('<')>=0){warning("Unexpected <.");}
+if(nexttoken.value.indexOf('>')>=0){warning("Unexpected >.");}}
+xmode='outer';advance('>');break;case'(end)':return;default:if(nexttoken.id==='(end)'){error("Missing '{a}'.",nexttoken,'</'+stack[stack.length-1].value+'>');}else{advance();}}
+if(stack&&stack.length===0&&(option.adsafe||!option.fragment||nexttoken.id==='(end)')){break;}}
+if(nexttoken.id!=='(end)'){error("Unexpected material after the end.");}}
+type('(number)',idValue);type('(string)',idValue);syntax['(identifier)']={type:'(identifier)',lbp:0,identifier:true,nud:function(){var v=this.value,s=scope[v],f;if(typeof s==='function'){s=undefined;}else if(typeof s==='boolean'){f=funct;funct=functions[0];addlabel(v,'var');s=funct;funct=f;}
+if(funct===s){switch(funct[v]){case'unused':funct[v]='var';break;case'label':warning("'{a}' is a statement label.",token,v);break;}}else if(funct['(global)']){if(option.undef&&predefined[v]!=='boolean'){warning("'{a}' is not defined.",token,v);}
+note_implied(token);}else{switch(funct[v]){case'closure':case'function':case'var':case'unused':warning("'{a}' used out of scope.",token,v);break;case'label':warning("'{a}' is a statement label.",token,v);break;case'outer':case'global':break;default:if(s===true){funct[v]=true;}else if(s===null){warning("'{a}' is not allowed.",token,v);note_implied(token);}else if(typeof s!=='object'){if(option.undef){warning("'{a}' is not defined.",token,v);}else{funct[v]=true;}
+note_implied(token);}else{switch(s[v]){case'function':case'var':case'unused':s[v]='closure';funct[v]=s['(global)']?'global':'outer';break;case'closure':case'parameter':funct[v]=s['(global)']?'global':'outer';break;case'label':warning("'{a}' is a statement label.",token,v);}}}}
+return this;},led:function(){error("Expected an operator and instead saw '{a}'.",nexttoken,nexttoken.value);}};type('(regexp)',function(){return this;});delim('(endline)');delim('(begin)');delim('(end)').reach=true;delim('</').reach=true;delim('<!');delim('<!--');delim('-->');delim('(error)').reach=true;delim('}').reach=true;delim(')');delim(']');delim('"').reach=true;delim("'").reach=true;delim(';');delim(':').reach=true;delim(',');delim('#');delim('@');reserve('else');reserve('case').reach=true;reserve('catch');reserve('default').reach=true;reserve('finally');reservevar('arguments');reservevar('eval');reservevar('false');reservevar('Infinity');reservevar('NaN');reservevar('null');reservevar('this');reservevar('true');reservevar('undefined');assignop('=','assign',20);assignop('+=','assignadd',20);assignop('-=','assignsub',20);assignop('*=','assignmult',20);assignop('/=','assigndiv',20).nud=function(){error("A regular expression literal can be confused with '/='.");};assignop('%=','assignmod',20);bitwiseassignop('&=','assignbitand',20);bitwiseassignop('|=','assignbitor',20);bitwiseassignop('^=','assignbitxor',20);bitwiseassignop('<<=','assignshiftleft',20);bitwiseassignop('>>=','assignshiftright',20);bitwiseassignop('>>>=','assignshiftrightunsigned',20);infix('?',function(left,that){that.left=left;that.right=parse(10);advance(':');that['else']=parse(10);return that;},30);infix('||','or',40);infix('&&','and',50);bitwise('|','bitor',70);bitwise('^','bitxor',80);bitwise('&','bitand',90);relation('==',function(left,right){if(option.eqeqeq){warning("Expected '{a}' and instead saw '{b}'.",this,'===','==');}else if(isPoorRelation(left)){warning("Use '{a}' to compare with '{b}'.",this,'===',left.value);}else if(isPoorRelation(right)){warning("Use '{a}' to compare with '{b}'.",this,'===',right.value);}
+return this;});relation('===');relation('!=',function(left,right){if(option.eqeqeq){warning("Expected '{a}' and instead saw '{b}'.",this,'!==','!=');}else if(isPoorRelation(left)){warning("Use '{a}' to compare with '{b}'.",this,'!==',left.value);}else if(isPoorRelation(right)){warning("Use '{a}' to compare with '{b}'.",this,'!==',right.value);}
+return this;});relation('!==');relation('<');relation('>');relation('<=');relation('>=');bitwise('<<','shiftleft',120);bitwise('>>','shiftright',120);bitwise('>>>','shiftrightunsigned',120);infix('in','in',120);infix('instanceof','instanceof',120);infix('+',function(left,that){var right=parse(130);if(left&&right&&left.id==='(string)'&&right.id==='(string)'){left.value+=right.value;left.character=right.character;if(jx.test(left.value)){warning("JavaScript URL.",left);}
+return left;}
+that.left=left;that.right=right;return that;},130);prefix('+','num');infix('-','sub',130);prefix('-','neg');infix('*','mult',140);infix('/','div',140);infix('%','mod',140);suffix('++','postinc');prefix('++','preinc');syntax['++'].exps=true;suffix('--','postdec');prefix('--','predec');syntax['--'].exps=true;prefix('delete',function(){var p=parse(0);if(!p||(p.id!=='.'&&p.id!=='[')){warning("Expected '{a}' and instead saw '{b}'.",nexttoken,'.',nexttoken.value);}
+this.first=p;return this;}).exps=true;prefix('~',function(){if(option.bitwise){warning("Unexpected '{a}'.",this,'~');}
+parse(150);return this;});prefix('!',function(){this.right=parse(150);this.arity='unary';if(bang[this.right.id]===true){warning("Confusing use of '{a}'.",this,'!');}
+return this;});prefix('typeof','typeof');prefix('new',function(){var c=parse(155),i;if(c&&c.id!=='function'){if(c.identifier){c['new']=true;switch(c.value){case'Object':warning("Use the object literal notation {}.",token);break;case'Array':if(nexttoken.id!=='('){warning("Use the array literal notation [].",token);}else{advance('(');if(nexttoken.id===')'){warning("Use the array literal notation [].",token);}else{i=parse(0);c.dimension=i;if((i.id==='(number)'&&/[.+\-Ee]/.test(i.value))||(i.id==='-'&&!i.right)||i.id==='(string)'||i.id==='['||i.id==='{'||i.id==='true'||i.id==='false'||i.id==='null'||i.id==='undefined'||i.id==='Infinity'){warning("Use the array literal notation [].",token);}
+if(nexttoken.id!==')'){error("Use the array literal notation [].",token);}}
+advance(')');}
+this.first=c;return this;case'Number':case'String':case'Boolean':case'Math':case'JSON':warning("Do not use {a} as a constructor.",token,c.value);break;case'Function':if(!option.evil){warning("The Function constructor is eval.");}
+break;case'Date':case'RegExp':break;default:if(c.id!=='function'){i=c.value.substr(0,1);if(option.newcap&&(i<'A'||i>'Z')){warning("A constructor name should start with an uppercase letter.",token);}}}}else{if(c.id!=='.'&&c.id!=='['&&c.id!=='('){warning("Bad constructor.",token);}}}else{warning("Weird construction. Delete 'new'.",this);}
+adjacent(token,nexttoken);if(nexttoken.id!=='('){warning("Missing '()' invoking a constructor.");}
+this.first=c;return this;});syntax['new'].exps=true;infix('.',function(left,that){adjacent(prevtoken,token);var m=identifier();if(typeof m==='string'){countMember(m);}
+that.left=left;that.right=m;if(!option.evil&&left&&left.value==='document'&&(m==='write'||m==='writeln')){warning("document.write can be a form of eval.",left);}else if(option.adsafe){if(left&&left.value==='ADSAFE'){if(m==='id'||m==='lib'){warning("ADsafe violation.",that);}else if(m==='go'){if(xmode!=='script'){warning("ADsafe violation.",that);}else if(adsafe_went||nexttoken.id!=='('||peek(0).id!=='(string)'||peek(0).value!==adsafe_id||peek(1).id!==','){error("ADsafe violation: go.",that);}
+adsafe_went=true;adsafe_may=false;}}}
+if(!option.evil&&(m==='eval'||m==='execScript')){warning('eval is evil.');}else if(option.safe){for(;;){if(banned[m]===true){warning("ADsafe restricted word '{a}'.",token,m);}
+if(typeof predefined[left.value]!=='boolean'||nexttoken.id==='('){break;}
+if(standard_member[m]===true){if(nexttoken.id==='.'){warning("ADsafe violation.",that);}
+break;}
+if(nexttoken.id!=='.'){warning("ADsafe violation.",that);break;}
+advance('.');token.left=that;token.right=m;that=token;m=identifier();if(typeof m==='string'){countMember(m);}}}
+return that;},160,true);infix('(',function(left,that){adjacent(prevtoken,token);nospace();var n=0,p=[];if(left){if(left.type==='(identifier)'){if(left.value.match(/^[A-Z]([A-Z0-9_$]*[a-z][A-Za-z0-9_$]*)?$/)){if(left.value!=='Number'&&left.value!=='String'&&left.value!=='Boolean'&&left.value!=='Date'){if(left.value==='Math'){warning("Math is not a function.",left);}else if(option.newcap){warning("Missing 'new' prefix when invoking a constructor.",left);}}}}else if(left.id==='.'){if(option.safe&&left.left.value==='Math'&&left.right==='random'){warning("ADsafe violation.",left);}}}
+if(nexttoken.id!==')'){for(;;){p[p.length]=parse(10);n+=1;if(nexttoken.id!==','){break;}
+comma();}}
+advance(')');if(option.immed&&left.id==='function'&&nexttoken.id!==')'){warning("Wrap the entire immediate function invocation in parens.",that);}
+nospace(prevtoken,token);if(typeof left==='object'){if(left.value==='parseInt'&&n===1){warning("Missing radix parameter.",left);}
+if(!option.evil){if(left.value==='eval'||left.value==='Function'||left.value==='execScript'){warning("eval is evil.",left);}else if(p[0]&&p[0].id==='(string)'&&(left.value==='setTimeout'||left.value==='setInterval')){warning("Implied eval is evil. Pass a function instead of a string.",left);}}
+if(!left.identifier&&left.id!=='.'&&left.id!=='['&&left.id!=='('&&left.id!=='&&'&&left.id!=='||'&&left.id!=='?'){warning("Bad invocation.",left);}}
+that.left=left;return that;},155,true).exps=true;prefix('(',function(){nospace();var v=parse(0);advance(')',this);nospace(prevtoken,token);if(option.immed&&v.id==='function'){if(nexttoken.id==='('){warning("Move the invocation into the parens that contain the function.",nexttoken);}else{warning("Do not wrap function literals in parens unless they are to be immediately invoked.",this);}}
+return v;});infix('[',function(left,that){nospace();var e=parse(0),s;if(e&&e.type==='(string)'){if(option.safe&&banned[e.value]===true){warning("ADsafe restricted word '{a}'.",that,e.value);}else if(!option.evil&&(e.value==='eval'||e.value==='execScript')){warning("eval is evil.",that);}else if(option.safe&&(e.value.charAt(0)==='_'||e.value.charAt(0)==='-')){warning("ADsafe restricted subscript '{a}'.",that,e.value);}
+countMember(e.value);if(!option.sub&&ix.test(e.value)){s=syntax[e.value];if(!s||!s.reserved){warning("['{a}'] is better written in dot notation.",e,e.value);}}}else if(!e||e.type!=='(number)'||e.value<0){if(option.safe){warning('ADsafe subscripting.');}}
+advance(']',that);nospace(prevtoken,token);that.left=left;that.right=e;return that;},160,true);prefix('[',function(){var b=token.line!==nexttoken.line;this.first=[];if(b){indent+=option.indent;if(nexttoken.from===indent+option.indent){indent+=option.indent;}}
+while(nexttoken.id!=='(end)'){while(nexttoken.id===','){warning("Extra comma.");advance(',');}
+if(nexttoken.id===']'){break;}
+if(b&&token.line!==nexttoken.line){indentation();}
+this.first.push(parse(10));if(nexttoken.id===','){comma();if(nexttoken.id===']'&&!option.es5){warning("Extra comma.",token);break;}}else{break;}}
+if(b){indent-=option.indent;indentation();}
+advance(']',this);return this;},160);function property_name(){var i=optionalidentifier(true);if(!i){if(nexttoken.id==='(string)'){i=nexttoken.value;advance();}else if(nexttoken.id==='(number)'){i=nexttoken.value.toString();advance();}}
+return i;}
+function functionparams(){var i,t=nexttoken,p=[];advance('(');nospace();if(nexttoken.id===')'){advance(')');nospace(prevtoken,token);return;}
+for(;;){i=identifier();p.push(i);addlabel(i,'parameter');if(nexttoken.id===','){comma();}else{advance(')',t);nospace(prevtoken,token);return p;}}}
+function doFunction(i){var f,s=scope;scope=Object.create(s);funct={'(name)':i||'"'+anonname+'"','(line)':nexttoken.line,'(context)':funct,'(breakage)':0,'(loopage)':0,'(scope)':scope};f=funct;token.funct=funct;functions.push(funct);if(i){addlabel(i,'function');}
+funct['(params)']=functionparams();block(false);scope=s;funct['(last)']=token.line;funct=funct['(context)'];return f;}
+(function(x){x.nud=function(){var b,f,i,j,p,seen={},t;b=token.line!==nexttoken.line;if(b){indent+=option.indent;if(nexttoken.from===indent+option.indent){indent+=option.indent;}}
+for(;;){if(nexttoken.id==='}'){break;}
+if(b){indentation();}
+if(nexttoken.value==='get'&&peek().id!==':'){advance('get');if(!option.es5){error("get/set are ES5 features.");}
+i=property_name();if(!i){error("Missing property name.");}
+t=nexttoken;adjacent(token,nexttoken);f=doFunction(i);if(funct['(loopage)']){warning("Don't make functions within a loop.",t);}
+p=f['(params)'];if(p){warning("Unexpected parameter '{a}' in get {b} function.",t,p[0],i);}
+adjacent(token,nexttoken);advance(',');indentation();advance('set');j=property_name();if(i!==j){error("Expected {a} and instead saw {b}.",token,i,j);}
+t=nexttoken;adjacent(token,nexttoken);f=doFunction(i);p=f['(params)'];if(!p||p.length!==1||p[0]!=='value'){warning("Expected (value) in set {a} function.",t,i);}}else{i=property_name();if(typeof i!=='string'){break;}
+advance(':');nonadjacent(token,nexttoken);parse(10);}
+if(seen[i]===true){warning("Duplicate member '{a}'.",nexttoken,i);}
+seen[i]=true;countMember(i);if(nexttoken.id===','){comma();if(nexttoken.id===','){warning("Extra comma.",token);}else if(nexttoken.id==='}'&&!option.es5){warning("Extra comma.",token);}}else{break;}}
+if(b){indent-=option.indent;indentation();}
+advance('}',this);return this;};x.fud=function(){error("Expected to see a statement and instead saw a block.",token);};}(delim('{')));function varstatement(prefix){var id,name,value;if(funct['(onevar)']&&option.onevar){warning("Too many var statements.");}else if(!funct['(global)']){funct['(onevar)']=true;}
+this.first=[];for(;;){nonadjacent(token,nexttoken);id=identifier();if(funct['(global)']&&predefined[id]===false){warning("Redefinition of '{a}'.",token,id);}
+addlabel(id,'unused');if(prefix){break;}
+name=token;this.first.push(token);if(nexttoken.id==='='){nonadjacent(token,nexttoken);advance('=');nonadjacent(token,nexttoken);if(nexttoken.id==='undefined'){warning("It is not necessary to initialize '{a}' to 'undefined'.",token,id);}
+if(peek(0).id==='='&&nexttoken.identifier){error("Variable {a} was not declared correctly.",nexttoken,nexttoken.value);}
+value=parse(0);name.first=value;}
+if(nexttoken.id!==','){break;}
+comma();}
+return this;}
+stmt('var',varstatement).exps=true;blockstmt('function',function(){if(inblock){warning("Function statements cannot be placed in blocks. Use a function expression or move the statement to the top of the outer function.",token);}
+var i=identifier();adjacent(token,nexttoken);addlabel(i,'unused');doFunction(i);if(nexttoken.id==='('&&nexttoken.line===token.line){error("Function statements are not invocable. Wrap the whole function invocation in parens.");}
+return this;});prefix('function',function(){var i=optionalidentifier();if(i){adjacent(token,nexttoken);}else{nonadjacent(token,nexttoken);}
+doFunction(i);if(funct['(loopage)']){warning("Don't make functions within a loop.");}
+return this;});blockstmt('if',function(){var t=nexttoken;advance('(');nonadjacent(this,t);nospace();parse(20);if(nexttoken.id==='='){warning("Expected a conditional expression and instead saw an assignment.");advance('=');parse(20);}
+advance(')',t);nospace(prevtoken,token);block(true);if(nexttoken.id==='else'){nonadjacent(token,nexttoken);advance('else');if(nexttoken.id==='if'||nexttoken.id==='switch'){statement(true);}else{block(true);}}
+return this;});blockstmt('try',function(){var b,e,s;if(option.adsafe){warning("ADsafe try violation.",this);}
+block(false);if(nexttoken.id==='catch'){advance('catch');nonadjacent(token,nexttoken);advance('(');s=scope;scope=Object.create(s);e=nexttoken.value;if(nexttoken.type!=='(identifier)'){warning("Expected an identifier and instead saw '{a}'.",nexttoken,e);}else{addlabel(e,'exception');}
+advance();advance(')');block(false);b=true;scope=s;}
+if(nexttoken.id==='finally'){advance('finally');block(false);return;}else if(!b){error("Expected '{a}' and instead saw '{b}'.",nexttoken,'catch',nexttoken.value);}
+return this;});blockstmt('while',function(){var t=nexttoken;funct['(breakage)']+=1;funct['(loopage)']+=1;advance('(');nonadjacent(this,t);nospace();parse(20);if(nexttoken.id==='='){warning("Expected a conditional expression and instead saw an assignment.");advance('=');parse(20);}
+advance(')',t);nospace(prevtoken,token);block(true);funct['(breakage)']-=1;funct['(loopage)']-=1;return this;}).labelled=true;reserve('with');blockstmt('switch',function(){var t=nexttoken,g=false;funct['(breakage)']+=1;advance('(');nonadjacent(this,t);nospace();this.condition=parse(20);advance(')',t);nospace(prevtoken,token);nonadjacent(token,nexttoken);t=nexttoken;advance('{');nonadjacent(token,nexttoken);indent+=option.indent;this.cases=[];for(;;){switch(nexttoken.id){case'case':switch(funct['(verb)']){case'break':case'case':case'continue':case'return':case'switch':case'throw':break;default:warning("Expected a 'break' statement before 'case'.",token);}
+indentation(-option.indent);advance('case');this.cases.push(parse(20));g=true;advance(':');funct['(verb)']='case';break;case'default':switch(funct['(verb)']){case'break':case'continue':case'return':case'throw':break;default:warning("Expected a 'break' statement before 'default'.",token);}
+indentation(-option.indent);advance('default');g=true;advance(':');break;case'}':indent-=option.indent;indentation();advance('}',t);if(this.cases.length===1||this.condition.id==='true'||this.condition.id==='false'){warning("This 'switch' should be an 'if'.",this);}
+funct['(breakage)']-=1;funct['(verb)']=undefined;return;case'(end)':error("Missing '{a}'.",nexttoken,'}');return;default:if(g){switch(token.id){case',':error("Each value should have its own case label.");return;case':':statements();break;default:error("Missing ':' on a case clause.",token);}}else{error("Expected '{a}' and instead saw '{b}'.",nexttoken,'case',nexttoken.value);}}}}).labelled=true;stmt('debugger',function(){if(!option.debug){warning("All 'debugger' statements should be removed.");}
+return this;}).exps=true;(function(){var x=stmt('do',function(){funct['(breakage)']+=1;funct['(loopage)']+=1;this.first=block(true);advance('while');var t=nexttoken;nonadjacent(token,t);advance('(');nospace();parse(20);if(nexttoken.id==='='){warning("Expected a conditional expression and instead saw an assignment.");advance('=');parse(20);}
+advance(')',t);nospace(prevtoken,token);funct['(breakage)']-=1;funct['(loopage)']-=1;return this;});x.labelled=true;x.exps=true;}());blockstmt('for',function(){var f=option.forin,s,t=nexttoken;funct['(breakage)']+=1;funct['(loopage)']+=1;advance('(');nonadjacent(this,t);nospace();if(peek(nexttoken.id==='var'?1:0).id==='in'){if(nexttoken.id==='var'){advance('var');varstatement(true);}else{switch(funct[nexttoken.value]){case'unused':funct[nexttoken.value]='var';break;case'var':break;default:warning("Bad for in variable '{a}'.",nexttoken,nexttoken.value);}
+advance();}
+advance('in');parse(20);advance(')',t);s=block(true);if(!f&&(s.length>1||typeof s[0]!=='object'||s[0].value!=='if')){warning("The body of a for in should be wrapped in an if statement to filter unwanted properties from the prototype.",this);}
+funct['(breakage)']-=1;funct['(loopage)']-=1;return this;}else{if(nexttoken.id!==';'){if(nexttoken.id==='var'){advance('var');varstatement();}else{for(;;){parse(0,'for');if(nexttoken.id!==','){break;}
+comma();}}}
+nolinebreak(token);advance(';');if(nexttoken.id!==';'){parse(20);if(nexttoken.id==='='){warning("Expected a conditional expression and instead saw an assignment.");advance('=');parse(20);}}
+nolinebreak(token);advance(';');if(nexttoken.id===';'){error("Expected '{a}' and instead saw '{b}'.",nexttoken,')',';');}
+if(nexttoken.id!==')'){for(;;){parse(0,'for');if(nexttoken.id!==','){break;}
+comma();}}
+advance(')',t);nospace(prevtoken,token);block(true);funct['(breakage)']-=1;funct['(loopage)']-=1;return this;}}).labelled=true;stmt('break',function(){var v=nexttoken.value;if(funct['(breakage)']===0){warning("Unexpected '{a}'.",nexttoken,this.value);}
+nolinebreak(this);if(nexttoken.id!==';'){if(token.line===nexttoken.line){if(funct[v]!=='label'){warning("'{a}' is not a statement label.",nexttoken,v);}else if(scope[v]!==funct){warning("'{a}' is out of scope.",nexttoken,v);}
+this.first=nexttoken;advance();}}
+reachable('break');return this;}).exps=true;stmt('continue',function(){var v=nexttoken.value;if(funct['(breakage)']===0){warning("Unexpected '{a}'.",nexttoken,this.value);}
+nolinebreak(this);if(nexttoken.id!==';'){if(token.line===nexttoken.line){if(funct[v]!=='label'){warning("'{a}' is not a statement label.",nexttoken,v);}else if(scope[v]!==funct){warning("'{a}' is out of scope.",nexttoken,v);}
+this.first=nexttoken;advance();}}else if(!funct['(loopage)']){warning("Unexpected '{a}'.",nexttoken,this.value);}
+reachable('continue');return this;}).exps=true;stmt('return',function(){nolinebreak(this);if(nexttoken.id==='(regexp)'){warning("Wrap the /regexp/ literal in parens to disambiguate the slash operator.");}
+if(nexttoken.id!==';'&&!nexttoken.reach){nonadjacent(token,nexttoken);this.first=parse(20);}
+reachable('return');return this;}).exps=true;stmt('throw',function(){nolinebreak(this);nonadjacent(token,nexttoken);this.first=parse(20);reachable('throw');return this;}).exps=true;reserve('void');reserve('class');reserve('const');reserve('enum');reserve('export');reserve('extends');reserve('import');reserve('super');reserve('let');reserve('yield');reserve('implements');reserve('interface');reserve('package');reserve('private');reserve('protected');reserve('public');reserve('static');function jsonValue(){function jsonObject(){var o={},t=nexttoken;advance('{');if(nexttoken.id!=='}'){for(;;){if(nexttoken.id==='(end)'){error("Missing '}' to match '{' from line {a}.",nexttoken,t.line);}else if(nexttoken.id==='}'){warning("Unexpected comma.",token);break;}else if(nexttoken.id===','){error("Unexpected comma.",nexttoken);}else if(nexttoken.id!=='(string)'){warning("Expected a string and instead saw {a}.",nexttoken,nexttoken.value);}
+if(o[nexttoken.value]===true){warning("Duplicate key '{a}'.",nexttoken,nexttoken.value);}else if(nexttoken.value==='__proto__'){warning("Stupid key '{a}'.",nexttoken,nexttoken.value);}else{o[nexttoken.value]=true;}
+advance();advance(':');jsonValue();if(nexttoken.id!==','){break;}
+advance(',');}}
+advance('}');}
+function jsonArray(){var t=nexttoken;advance('[');if(nexttoken.id!==']'){for(;;){if(nexttoken.id==='(end)'){error("Missing ']' to match '[' from line {a}.",nexttoken,t.line);}else if(nexttoken.id===']'){warning("Unexpected comma.",token);break;}else if(nexttoken.id===','){error("Unexpected comma.",nexttoken);}
+jsonValue();if(nexttoken.id!==','){break;}
+advance(',');}}
+advance(']');}
+switch(nexttoken.id){case'{':jsonObject();break;case'[':jsonArray();break;case'true':case'false':case'null':case'(number)':case'(string)':advance();break;case'-':advance('-');if(token.character!==nexttoken.from){warning("Unexpected space after '-'.",token);}
+adjacent(token,nexttoken);advance('(number)');break;default:error("Expected a JSON value.",nexttoken);}}
+var itself=function(s,o){var a,i;JSLINT.errors=[];predefined=Object.create(standard);if(o){a=o.predef;if(a instanceof Array){for(i=0;i<a.length;i+=1){predefined[a[i]]=true;}}
+if(o.adsafe){o.safe=true;}
+if(o.safe){o.browser=false;o.css=false;o.debug=false;o.devel=false;o.eqeqeq=true;o.evil=false;o.forin=false;o.nomen=true;o.on=false;o.rhino=false;o.safe=true;o.windows=false;o.strict=true;o.sub=false;o.undef=true;o.widget=false;predefined.Date=null;predefined['eval']=null;predefined.Function=null;predefined.Object=null;predefined.ADSAFE=false;predefined.lib=false;}
+option=o;}else{option={};}
+option.indent=option.indent||4;option.maxerr=option.maxerr||50;adsafe_id='';adsafe_may=false;adsafe_went=false;approved={};if(option.approved){for(i=0;i<option.approved.length;i+=1){approved[option.approved[i]]=option.approved[i];}}else{approved.test='test';}
+tab='';for(i=0;i<option.indent;i+=1){tab+=' ';}
+indent=1;global=Object.create(predefined);scope=global;funct={'(global)':true,'(name)':'(global)','(scope)':scope,'(breakage)':0,'(loopage)':0};functions=[funct];ids={};urls=[];src=false;xmode=false;stack=null;member={};membersOnly=null;implied={};inblock=false;lookahead=[];jsonmode=false;warnings=0;lex.init(s);prereg=true;strict_mode=false;prevtoken=token=nexttoken=syntax['(begin)'];assume();try{advance();if(nexttoken.value.charAt(0)==='<'){html();if(option.adsafe&&!adsafe_went){warning("ADsafe violation: Missing ADSAFE.go.",this);}}else{switch(nexttoken.id){case'{':case'[':option.laxbreak=true;jsonmode=true;jsonValue();break;case'@':case'*':case'#':case'.':case':':xmode='style';advance();if(token.id!=='@'||!nexttoken.identifier||nexttoken.value!=='charset'||token.line!==1||token.from!==1){error('A css file should begin with @charset "UTF-8";');}
+advance();if(nexttoken.type!=='(string)'&&nexttoken.value!=='UTF-8'){error('A css file should begin with @charset "UTF-8";');}
+advance();advance(';');styles();break;default:if(option.adsafe&&option.fragment){error("Expected '{a}' and instead saw '{b}'.",nexttoken,'<div>',nexttoken.value);}
+statements('lib');}}
+advance('(end)');}catch(e){if(e){JSLINT.errors.push({reason:e.message,line:e.line||nexttoken.line,character:e.character||nexttoken.from},null);}}
+return JSLINT.errors.length===0;};function is_array(o){return Object.prototype.toString.apply(o)==='[object Array]';}
+function to_array(o){var a=[],k;for(k in o){if(is_own(o,k)){a.push(k);}}
+return a;}
+itself.data=function(){var data={functions:[]},fu,globals,implieds=[],f,i,j,members=[],n,unused=[],v;if(itself.errors.length){data.errors=itself.errors;}
+if(jsonmode){data.json=true;}
+for(n in implied){if(is_own(implied,n)){implieds.push({name:n,line:implied[n]});}}
+if(implieds.length>0){data.implieds=implieds;}
+if(urls.length>0){data.urls=urls;}
+globals=to_array(scope);if(globals.length>0){data.globals=globals;}
+for(i=1;i<functions.length;i+=1){f=functions[i];fu={};for(j=0;j<functionicity.length;j+=1){fu[functionicity[j]]=[];}
+for(n in f){if(is_own(f,n)&&n.charAt(0)!=='('){v=f[n];if(is_array(fu[v])){fu[v].push(n);if(v==='unused'){unused.push({name:n,line:f['(line)'],'function':f['(name)']});}}}}
+for(j=0;j<functionicity.length;j+=1){if(fu[functionicity[j]].length===0){delete fu[functionicity[j]];}}
+fu.name=f['(name)'];fu.param=f['(params)'];fu.line=f['(line)'];fu.last=f['(last)'];data.functions.push(fu);}
+if(unused.length>0){data.unused=unused;}
+members=[];for(n in member){if(typeof member[n]==='number'){data.member=member;break;}}
+return data;};itself.report=function(option){var data=itself.data();var a=[],c,e,err,f,i,k,l,m='',n,o=[],s;function detail(h,array){var b,i,singularity;if(array){o.push('<div><i>'+h+'</i> ');array=array.sort();for(i=0;i<array.length;i+=1){if(array[i]!==singularity){singularity=array[i];o.push((b?', ':'')+singularity);b=true;}}
+o.push('</div>');}}
+if(data.errors||data.implieds||data.unused){err=true;o.push('<div id=errors><i>Error:</i>');if(data.errors){for(i=0;i<data.errors.length;i+=1){c=data.errors[i];if(c){e=c.evidence||'';o.push('<p>Problem'+(isFinite(c.line)?' at line '+
+c.line+' character '+c.character:'')+': '+c.reason.entityify()+'</p><p class=evidence>'+
+(e&&(e.length>80?e.slice(0,77)+'...':e).entityify())+'</p>');}}}
+if(data.implieds){s=[];for(i=0;i<data.implieds.length;i+=1){s[i]='<code>'+data.implieds[i].name+'</code>&nbsp;<i>'+
+data.implieds[i].line+'</i>';}
+o.push('<p><i>Implied global:</i> '+s.join(', ')+'</p>');}
+if(data.unused){s=[];for(i=0;i<data.unused.length;i+=1){s[i]='<code><u>'+data.unused[i].name+'</u></code>&nbsp;<i>'+
+data.unused[i].line+'</i> <code>'+
+data.unused[i]['function']+'</code>';}
+o.push('<p><i>Unused variable:</i> '+s.join(', ')+'</p>');}
+if(data.json){o.push('<p>JSON: bad.</p>');}
+o.push('</div>');}
+if(!option){o.push('<br><div id=functions>');if(data.urls){detail("URLs<br>",data.urls,'<br>');}
+if(xmode==='style'){o.push('<p>CSS.</p>');}else if(data.json&&!err){o.push('<p>JSON: good.</p>');}else if(data.globals){o.push('<div><i>Global</i> '+
+data.globals.sort().join(', ')+'</div>');}else{o.push('<div><i>No new global variables introduced.</i></div>');}
+for(i=0;i<data.functions.length;i+=1){f=data.functions[i];o.push('<br><div class=function><i>'+f.line+'-'+
+f.last+'</i> '+(f.name||'')+'('+
+(f.param?f.param.join(', '):'')+')</div>');detail('<big><b>Unused</b></big>',f.unused);detail('Closure',f.closure);detail('Variable',f['var']);detail('Exception',f.exception);detail('Outer',f.outer);detail('Global',f.global);detail('Label',f.label);}
+if(data.member){a=to_array(data.member);if(a.length){a=a.sort();m='<br><pre id=members>/*members ';l=10;for(i=0;i<a.length;i+=1){k=a[i];n=k.name();if(l+n.length>72){o.push(m+'<br>');m='    ';l=1;}
+l+=n.length+2;if(data.member[k]===1){n='<i>'+n+'</i>';}
+if(i<a.length-1){n+=', ';}
+m+=n;}
+o.push(m+'<br>*/</pre>');}
+o.push('</div>');}}
+return o.join('');};itself.jslint=itself;itself.edition='2010-04-06';return itself;}());(function(a){var e,i,input;if(!a[0]){print("Usage: jslint.js file.js");quit(1);}
+input=readFile(a[0]);if(!input){print("jslint: Couldn't open file '"+a[0]+"'.");quit(1);}
+if(!JSLINT(input,{bitwise:true,eqeqeq:true,immed:true,newcap:true,nomen:true,onevar:true,plusplus:true,regexp:true,rhino:true,undef:true,white:true})){for(i=0;i<JSLINT.errors.length;i+=1){e=JSLINT.errors[i];if(e){print('Lint at line '+e.line+' character '+
+e.character+': '+e.reason);print((e.evidence||'').replace(/^\s*(\S*(\s+\S+)*)\s*$/,"$1"));print('');}}
+quit(2);}else{print("jslint: No problems found in "+a[0]);quit();}}(arguments));
\ No newline at end of file
--- a/hooks.py	Wed Jul 28 12:10:03 2010 +0200
+++ b/hooks.py	Fri Sep 10 14:14:42 2010 +0200
@@ -17,39 +17,79 @@
 from cubicweb.sobjects import notification as notifviews
 
 from cubes.vcsfile.entities import _MARKER
-from cubes.apycot.entities import bot_proxy, vcsrepo_apycot_info
+
+def start_period_tests(session, period):
+    rset = session.execute(
+        'Any TC,TCN,TCS,S WHERE '
+        'TC computed_start_mode %(sm)s, TC in_state S, S name "activated", '
+        'TC name TCN, TC start_rev_deps TCS', {'sm': period})
+    for i in xrange(rset.rowcount):
+        tc = rset.get_entity(i, 0)
+        for env in tc.iter_environments():
+            branch = tc.apycot_configuration(env).get('branch', _MARKER)
+            if branch is _MARKER:
+                # check every active branch if no branch specified
+                heads = env.repository.heads_rset().entities()
+            else:
+                head = env.repository.branch_head(branch)
+                if head is None:
+                    # No head found for this branch, skip
+                    continue
+                heads = (head,)
+            for head in heads:
+                # only start test if the config hasn't been executed against
+                # current branch head
+                if session.execute(
+                    'Any TE WHERE TE using_revision REV, REV eid %(rev)s, '
+                    'TE using_config TC, TC eid %(tc)s',
+                    {'rev': head.eid, 'tc': tc.eid}):
+                    # This rev have already been tested
+                    continue
+                tc.start(env, head.branch)
+
+
+def start_tests_if_match(session, revision, pe):
+    rql = ('Any TC, PE, PEN, TCN, TCS WHERE TC use_environment PE, REV eid %(rev)s,'
+           'PE name PEN, PE eid %(pe)s, PE vcs_path PEP, TC name TCN, '
+           'TC start_rev_deps TCS, '
+           'TC computed_start_mode %(sm)s, TC in_state S, S name "activated", '
+           'VC from_revision REV, '
+           'VC content_for VF, VF directory ~= PEP + "%"'
+           )
+    rset = session.execute(rql, {'sm': 'on new revision',
+                                 'rev': revision.eid,
+                                 'pe': pe.eid})
+    if rset:
+        branch = revision.branch
+        for i, row in enumerate(rset):
+            tc = rset.get_entity(i, 0)
+            pe = rset.get_entity(i, 1)
+            tc.start(pe, revision.branch)
+
+
+class ComputeStartModeHook(hook.Hook):
+    __regid__ = 'apycot.compute_start_mode'
+    __select__ = hook.Hook.__select__ & is_instance('TestConfig')
+    events = ('before_add_entity', 'before_update_entity')
+
+    def __call__(self):
+        if self.entity.get('start_mode') == u'inherited':
+            ComputeStartModeOp(self._cw, tc=self.entity)
+
+class ComputeStartModeOp(hook.Operation):
+    def precommit_event(self):
+        tc = self.tc
+        if tc.start_mode == u'inherited':
+            if tc.config_parent:
+                tc.set_attributes(start_mode=tc.config_parent.start_mode)
+            else:
+                msg = self.session._('Inherited start mode can only be used if the '
+                                     'configuration refines another one')
+                raise ValidationError(tc.eid, {'start_mode': msg})
+
 
 # automatic test launching #####################################################
 
-def start_test(session, period):
-    tostart = set()
-    rql = ('Any TC, PE, PEN, TCN, TCS '
-           'WHERE TC use_environment PE, PE name PEN, '
-           'TC start_mode %(sm)s, TC in_state S, S name "activated", '
-           'TC name TCN, TC start_rev_deps TCS')
-    for tc in session.execute(rql, {'sm': period}).entities():
-        env = tc.environment
-        if not env.repository:
-            tostart.add((env.name, tc.name, tc.start_rev_deps, None))
-        else:
-            # XXX check every active branch if no branch specified
-            branch = tc.apycot_configuration.get('branch', _MARKER)
-            head = env.repository.branch_head(branch)
-            if head is None:
-                # No head found (in the case of branch specific test config)
-                continue
-            # only start test if this config hasn't been
-            # executed against current branch head
-            if session.execute(
-                'Any TE WHERE TE using_revision REV, REV eid %(rev)s, '
-                'TE using_config TC, TC eid %(tc)s',
-                {'rev': head.eid, 'tc': tc.eid}):
-                # This rev have already been tested
-                continue
-            tostart.add((env.name, tc.name, tc.start_rev_deps, head.branch))
-    return tostart
-
-
 class ServerStartupHook(hook.Hook):
     """add looping task to automatically start tests
     """
@@ -60,61 +100,42 @@
             return
         # XXX use named args and inner functions to avoid referencing globals
         # which may cause reloading pb
-        def check_test_to_start(repo, datetime=datetime, start_test=start_test,
-                                StartTestsOp=StartTestsOp):
+        def check_test_to_start(repo, datetime=datetime,
+                                start_period_tests=start_period_tests):
             now = datetime.now()
-            tostart = set()
             session = repo.internal_session()
             try:
                 # XXX only start task for environment which have changed in the
                 # given interval
-                tostart |= start_test(session, 'hourly')
+                start_period_tests(session, 'hourly')
                 if now.hour == 1:
-                    tostart |= start_test(session, 'daily')
+                    start_period_tests(session, 'daily')
                 if now.isoweekday() == 1:
-                    tostart |= start_test(session, 'weekly')
+                    start_period_tests(session, 'weekly')
                 if now.day == 1:
-                    tostart |= start_test(session, 'monthly')
-                if tostart:
-                    StartTestsOp(session, tostart)
+                    start_period_tests(session, 'monthly')
                 session.commit()
             finally:
                 session.close()
         self.repo.looping_task(60*60, check_test_to_start, self.repo)
-        cleanupdelay = self.repo.config['test-exec-cleanup-delay']
-        if not cleanupdelay:
-            return # no auto cleanup
-        cleanupinterval = min(60*60*24, cleanupdelay)
-        def cleanup_test_execs(repo, delay=timedelta(seconds=cleanupdelay),
-                               now=datetime.now):
-            session = repo.internal_session()
-            mindate = now() - delay
-            try:
-                session.execute('DELETE TestExecution TE '
-                                'WHERE TE modification_date < %(min)s',
-                                {'min': mindate})
-                session.commit()
-            finally:
-                session.close()
-        self.repo.looping_task(cleanupinterval, cleanup_test_execs, self.repo)
 
 
-class StartTestAfterAddVersionContent(hook.Hook):
+class StartTestAfterAddRevision(hook.Hook):
     __regid__ = 'apycot.start_test_on_new_rev'
     __select__ = hook.Hook.__select__ & is_instance('Revision')
     events = ('after_add_entity',)
 
     def __call__(self):
         vcsrepo = self.entity.repository
-        for pe in vcsrepo.reverse_local_repository:
-            if not pe.vcs_path:
-                StartTestsOp(self._cw, set(
-                    (pe.name, tc.name, tc.start_rev_deps, self.entity.branch)
-                    for tc in pe.reverse_use_environment
-                    if tc.start_mode == 'on new revision'
-                    and tc.match_branch(self.entity.branch)))
-            else:
-                StartTestsIfMatchOp(self._cw, revision=self.entity, pe=pe)
+        for basepe in vcsrepo.reverse_local_repository:
+            for pe in basepe.iter_refinements():
+                if not pe.vcs_path:
+                    for tc in pe.reverse_use_environment:
+                        if tc.computed_start_mode == 'on new revision' \
+                               and tc.match_branch(pe, self.entity.branch):
+                            tc.start(pe, self.entity.branch)
+                else:
+                    start_tests_if_match(self._cw, revision=self.entity, pe=pe)
         # when a test is started, it may use some revision of dependency's
         # repositories that may not be already imported by vcsfile. So when it
         # try to create a link between the execution and the revision, it
@@ -126,67 +147,12 @@
             'CRI label ~= %(repo)s, CRI value %(cs)s',
             {'cs': self.entity.changeset,
              # safety belt in case of duplicated short changeset. XXX useful?
-             'repo': '%s:%s%%' % vcsrepo_apycot_info(vcsrepo)}).entities():
+             'repo': '%s:%s%%' % (vcsrepo.type, vcsrepo.source_url or vcsrepo.path)
+             }).entities():
             cri.check_result.set_relations(using_revision=self.entity)
             cri.delete()
 
 
-class StartTestsIfMatchOp(hook.Operation):
-
-    def precommit_event(self):
-        rql = ('Any TC, PE, PEN, TCN, TCS WHERE TC use_environment PE, REV eid %(rev)s,'
-               'PE name PEN, PE eid %(pe)s, PE vcs_path PEP, TC name TCN, '
-               'TC start_rev_deps TCS, '
-               'TC start_mode %(sm)s, TC in_state S, S name "activated", '
-               'VC from_revision REV, '
-               'VC content_for VF, VF directory ~= PEP + "%"'
-               )
-        rset = self.session.execute(rql, {'sm': 'on new revision',
-                                          'rev': self.revision.eid,
-                                          'pe': self.pe.eid})
-        if rset:
-            branch = self.revision.branch
-            testconfigs = set((row[2], row[3], row[4], self.revision.branch)
-                               for i, row in enumerate(rset)
-                               if rset.get_entity(i, 0).match_branch(branch))
-            StartTestsOp(self.session, testconfigs)
-
-
-class StartTestsOp(hook.SingleLastOperation):
-    def __init__(self, session, tests):
-        self.tests = tests
-        super(StartTestsOp, self).__init__(session)
-
-    def register(self, session):
-        previous = super(StartTestsOp, self).register(session)
-        if previous:
-            self.tests |= previous.tests
-
-    def postcommit_event(self):
-        self.session.repo.threaded_task(self.start_tests)
-
-    def start_tests(self):
-        session = self.session
-        config = session.vreg.config
-        try:
-            bot = bot_proxy(config, session.transaction_data)
-        except Exception, ex:
-            self.error('cant contact apycot bot: %s', ex)
-            # XXX create a TestExecution to report the attempt to launch test
-            return
-        # XXX make start_rev_deps=True configurable
-        full_pyro_id = ':%(pyro-ns-group)s.%(pyro-instance-id)s' % config
-        for envname, tcname, startrevdeps, branch in self.tests:
-            try:
-                bot.queue_task(envname, tcname,
-                               branch=branch, start_rev_deps=startrevdeps,
-                               cwinstid=full_pyro_id)
-            except Exception, ex:
-                self.error('cant start test %s: %s', tcname, ex)
-                # XXX create a TestExecution to report the attempt to launch test
-                return
-
-
 # notifications ################################################################
 
 class ExecStatusChangeView(notifviews.NotificationView):
@@ -206,7 +172,8 @@
     def subject(self):
         entity = self.cw_rset.get_entity(0, 0)
         changes = entity.status_changes()
-        testconfig = entity.configuration.dc_title()
+        testconfig = '%s/%s' % (entity.environment.name,
+                                entity.configuration.name)
         if entity.branch:
             testconfig = u'%s#%s' % (testconfig, entity.branch)
         if len(changes) == 1:
@@ -224,7 +191,8 @@
                     count[tostate] = 1
             resume = ', '.join('%s %s' % (num, self._cw._(state))
                                for state, num in count.items())
-            subject = self._cw._('%s now has %s') % (testconfig, resume)
+            subject = self._cw._('%(testconfig)s now has %(resume)s') % {
+                'testconfig': testconfig, 'resume': resume}
         return '[%s] %s' % (self._cw.vreg.config.appid, subject)
 
     def context(self):
@@ -243,7 +211,9 @@
             chgs.append('* ' + (chg % locals()))
         ctx['changelist'] = '\n'.join(chgs)
         vcschanges = []
-        for env in [entity.configuration.environment] + entity.configuration.dependencies():
+        tconfig = entity.configuration
+        environment = entity.environment
+        for env in [environment] + tconfig.dependencies(environment):
             if env.repository:
                 vcsrepo = env.repository
                 vcsrepochanges = []
@@ -272,10 +242,10 @@
         return ctx
 
 
-class ExecStatusChangeHook(hook.Hook):
-    __regid__ = 'apycot.send_reports_on_exec_status_change'
+class TestExecutionUpdatedHook(hook.Hook):
+    __regid__ = 'apycot.te.status_change'
     __select__ = hook.Hook.__select__ & is_instance('TestExecution')
-    events = ('after_update_entity',)
+    events = ('before_update_entity',)
 
     def __call__(self):
         # end of test execution : set endtime
@@ -285,6 +255,9 @@
                 'exstchange', self._cw, rset=entity.cw_rset, row=entity.cw_row,
                 col=entity.cw_col)
             notifhooks.RenderAndSendNotificationView(self._cw, view=view)
+        if 'execution_status' in entity.edited_attributes and \
+               entity.status == 'waiting execution':
+            entity['status'] = entity.execution_status
 
 
 try:
--- a/i18n/en.po	Wed Jul 28 12:10:03 2010 +0200
+++ b/i18n/en.po	Fri Sep 10 14:14:42 2010 +0200
@@ -5,8 +5,7 @@
 "Generated-By: pygettext.py 1.5\n"
 "Plural-Forms: nplurals=2; plural=(n > 1);\n"
 
-#, python-format
-msgid " <i>(from group %s)</i>"
+msgid " (inherited)"
 msgstr ""
 
 #, python-format
@@ -22,23 +21,12 @@
 msgstr ""
 
 #, python-format
-msgid "%i pending tasks"
-msgstr ""
-
-#, python-format
-msgid "%i running tasks"
-msgstr ""
-
-#, python-format
-msgid "%s now has %s"
+msgid "%(testconfig)s now has %(resume)s"
 msgstr ""
 
 msgid "* no change found in known repositories"
 msgstr ""
 
-msgid "Apycot bot status"
-msgstr ""
-
 msgid "Apycot executions"
 msgstr ""
 
@@ -48,15 +36,6 @@
 msgid "Available options:"
 msgstr ""
 
-msgid "Available preprocessors:"
-msgstr ""
-
-msgid "Bot is not available for the following reason:"
-msgstr ""
-
-msgid "Bot is up and available."
-msgstr ""
-
 msgid "CheckResult"
 msgstr "Check result"
 
@@ -69,25 +48,12 @@
 msgid "CheckResult_plural"
 msgstr "Check results"
 
-msgid "DEBUG"
-msgstr ""
-
-msgid "ERROR"
+#, python-format
+msgid "Execution of %(pe)s/%(config)s on %(date)s"
 msgstr ""
 
 #, python-format
-msgid "Execution of %(config)s on %(date)s"
-msgstr ""
-
-#, python-format
-msgid "Execution of %(config)s#%(branch)s"
-msgstr ""
-
-#, python-format
-msgid "Execution on %(date)s"
-msgstr ""
-
-msgid "FATAL"
+msgid "Execution of %(pe)s/%(config)s#%(branch)s"
 msgstr ""
 
 msgid ""
@@ -95,21 +61,11 @@
 "not loaded by the apycot bot."
 msgstr ""
 
-msgid "HIGH"
-msgstr ""
-
-msgid "INFO"
+msgid ""
+"Inherited start mode can only be used if the configuration refines another "
+"one"
 msgstr ""
 
-msgid "LOW"
-msgstr ""
-
-msgid "MEDIUM"
-msgstr ""
-
-msgid "Message Threshold"
-msgstr "Filter messages bellow:"
-
 msgid "New CheckResult"
 msgstr "xxx"
 
@@ -122,18 +78,12 @@
 msgid "New TestConfig"
 msgstr "New test configuration"
 
-msgid "New TestConfigGroup"
-msgstr "New apycot config group"
+msgid "New TestDependency"
+msgstr "New test dependency"
 
 msgid "New TestExecution"
 msgstr "xxx"
 
-msgid "No pending task"
-msgstr ""
-
-msgid "No running task"
-msgstr ""
-
 msgid "ProjectEnvironment"
 msgstr "Project environment"
 
@@ -167,28 +117,18 @@
 msgid "Test executions summary"
 msgstr ""
 
-msgctxt "inlined:ProjectEnvironment.use_environment.object"
-msgid "TestConfig"
-msgstr ""
-
 msgid "TestConfig"
 msgstr "Test configuration"
 
-msgid "TestConfig without checkers can not be executed."
-msgstr "Test Configuration without checkers can not be executed."
-
-# schema pot file, generated on 2009-01-14 12:58:20
-#
-# singular and plural forms for each entity type
-msgid "TestConfigGroup"
-msgstr "Test configuration group"
-
-msgid "TestConfigGroup_plural"
-msgstr "Test configuration groups"
-
 msgid "TestConfig_plural"
 msgstr "Test configurations"
 
+msgid "TestDependency"
+msgstr "Test dependency"
+
+msgid "TestDependency_plural"
+msgstr "Test dependencies"
+
 # schema pot file, generated on 2008-12-23 10:18:45
 #
 # singular and plural forms for each entity type
@@ -210,15 +150,12 @@
 msgid "This TestConfig"
 msgstr "This test configuration"
 
-msgid "This TestConfigGroup"
-msgstr "This test configuration group"
+msgid "This TestDependency"
+msgstr "This test dependency"
 
 msgid "This TestExecution"
 msgstr "This apycot execution"
 
-msgid "WARNING"
-msgstr ""
-
 msgid "activate"
 msgstr ""
 
@@ -234,12 +171,11 @@
 msgid "add TestConfig use_environment ProjectEnvironment object"
 msgstr "test configuration"
 
-msgctxt "inlined:ProjectEnvironment.use_environment.object"
-msgid "add a TestConfig"
-msgstr "test configuration"
+msgid "add TestDependency for_environment ProjectEnvironment object"
+msgstr "test dependency"
 
-msgid "apycot"
-msgstr ""
+msgid "add TestDependency for_testconfig TestConfig object"
+msgstr "test dependency"
 
 msgid "apycot configuration to register a project branch to test"
 msgstr ""
@@ -247,8 +183,14 @@
 msgid "apycot documentation"
 msgstr ""
 
-msgid "apycotbot"
-msgstr "apycot bot"
+msgid "apycot.pe.tab_config"
+msgstr "configuration"
+
+msgid "apycot.tc.tab_config"
+msgstr "configuration"
+
+msgid "apycot.te.tab_setup"
+msgstr "environment setup"
 
 msgid "apycottestresults_tab"
 msgstr "test reports"
@@ -262,13 +204,22 @@
 msgid "archivetestdir"
 msgstr "archive test directory"
 
+# subject and object forms for each relation type
+# (no object form for final or symmetric relation types)
+msgctxt "TestExecution"
+msgid "arguments"
+msgstr ""
+
 msgid "automatic test status"
 msgstr ""
 
 msgid "boxes_apycot.te.download_box"
+msgstr "download box"
+
+msgid "boxes_apycot.te.download_box_description"
 msgstr ""
 
-msgid "boxes_apycot.te.download_box_description"
+msgid "branch"
 msgstr ""
 
 # subject and object forms for each relation type
@@ -277,7 +228,8 @@
 msgid "branch"
 msgstr ""
 
-msgid "branch"
+#, python-format
+msgid "branch=\"%s\"<br/>"
 msgstr ""
 
 msgid "changes_rss_exec_button"
@@ -291,10 +243,6 @@
 msgid "check_config"
 msgstr "configuration"
 
-msgctxt "TestConfigGroup"
-msgid "check_config"
-msgstr "configuration"
-
 msgctxt "ProjectEnvironment"
 msgid "check_config"
 msgstr "configuration"
@@ -306,10 +254,6 @@
 msgid "check_environment"
 msgstr "environment"
 
-msgctxt "TestConfigGroup"
-msgid "check_environment"
-msgstr "environment"
-
 msgctxt "ProjectEnvironment"
 msgid "check_environment"
 msgstr "environment"
@@ -318,28 +262,17 @@
 msgid "check_environment"
 msgstr "environment"
 
-msgid "check_preprocessors"
-msgstr "preprocessors"
-
-msgctxt "ProjectEnvironment"
-msgid "check_preprocessors"
-msgstr "preprocessors"
-
 msgid "checker"
 msgstr ""
 
 msgid "checks"
 msgstr ""
 
-msgctxt "TestConfigGroup"
-msgid "checks"
+msgid "computed_start_mode"
 msgstr ""
 
 msgctxt "TestConfig"
-msgid "checks"
-msgstr ""
-
-msgid "comma separated list of checks to execute in this test config"
+msgid "computed_start_mode"
 msgstr ""
 
 msgid "contentnavigation_all_execution_subscribe_rss"
@@ -354,12 +287,6 @@
 msgid "contentnavigation_changes_execution_subscribe_rss_description"
 msgstr "icons to subscribe to status changes feed"
 
-msgid "contentnavigation_starttestform"
-msgstr "start tests"
-
-msgid "contentnavigation_starttestform_description"
-msgstr "section to start test for a test configuration"
-
 msgid ""
 "creating ProjectEnvironment (Project %(linkto)s has_apycot_environment "
 "ProjectEnvironment)"
@@ -375,6 +302,15 @@
 "s)"
 msgstr "creating test configuration for environment %(linkto)s"
 
+msgid ""
+"creating TestDependency (TestDependency for_environment ProjectEnvironment "
+"%(linkto)s)"
+msgstr "creating test dependency for environment %(linkto)s"
+
+msgid ""
+"creating TestDependency (TestDependency for_testconfig TestConfig %(linkto)s)"
+msgstr "creating test dependency for configuration %(linkto)s"
+
 msgid "daily"
 msgstr ""
 
@@ -397,10 +333,10 @@
 msgid "during_execution"
 msgstr "during execution"
 
-msgctxt "TestExecution"
 msgid "during_execution_object"
 msgstr "checks"
 
+msgctxt "TestExecution"
 msgid "during_execution_object"
 msgstr "checks"
 
@@ -428,15 +364,24 @@
 msgid "error"
 msgstr ""
 
-msgid "execution"
-msgstr ""
-
 msgid "execution information"
 msgstr ""
 
 msgid "execution priority"
 msgstr ""
 
+msgctxt "TestExecution"
+msgid "execution_log"
+msgstr "execution log"
+
+msgctxt "TestExecution"
+msgid "execution_of"
+msgstr "execution of"
+
+msgctxt "TestExecution"
+msgid "execution_status"
+msgstr "execution status"
+
 msgid "facets_apycot.tc.env"
 msgstr "test configuration environment facet"
 
@@ -504,6 +449,9 @@
 msgid "for_check"
 msgstr "for check"
 
+msgid "for_check_object"
+msgstr "about"
+
 msgctxt "CheckResult"
 msgid "for_check_object"
 msgstr "about"
@@ -512,8 +460,33 @@
 msgid "for_check_object"
 msgstr "about"
 
-msgid "for_check_object"
-msgstr "about"
+msgid "for_environment"
+msgstr "for environment"
+
+msgctxt "TestDependency"
+msgid "for_environment"
+msgstr "for environment"
+
+msgid "for_environment_object"
+msgstr "has dependency"
+
+msgctxt "ProjectEnvironment"
+msgid "for_environment_object"
+msgstr "has dependency"
+
+msgid "for_testconfig"
+msgstr "using configuration"
+
+msgctxt "TestDependency"
+msgid "for_testconfig"
+msgstr "using configuration"
+
+msgid "for_testconfig_object"
+msgstr "has dependency"
+
+msgctxt "TestConfig"
+msgid "for_testconfig_object"
+msgstr "has dependency"
 
 msgid "group results of execution of a specific test on a project"
 msgstr ""
@@ -525,31 +498,35 @@
 msgid "has_apycot_environment"
 msgstr "test environment"
 
-msgctxt "ProjectEnvironment"
 msgid "has_apycot_environment_object"
 msgstr "project"
 
+msgctxt "ProjectEnvironment"
 msgid "has_apycot_environment_object"
 msgstr "project"
 
 msgid "hourly"
 msgstr ""
 
-msgid "inherited from group"
+msgid "inherited"
 msgstr ""
 
-msgid "install type"
+msgid "inherited from"
+msgstr ""
+
+#, python-format
+msgid "inherited from %s"
 msgstr ""
 
 msgctxt "CWUser"
 msgid "interested_in"
 msgstr "interested in"
 
-msgctxt "TestConfig"
+msgctxt "ProjectEnvironment"
 msgid "interested_in_object"
 msgstr "has interest of"
 
-msgctxt "ProjectEnvironment"
+msgctxt "TestConfig"
 msgid "interested_in_object"
 msgstr "has interest of"
 
@@ -559,21 +536,15 @@
 msgid "killed"
 msgstr ""
 
-msgid ""
-"kind of version control system (vcs): hg (mercurial), svn (subversion), cvs "
-"(CVS), fs (file system, eg no version control)"
-msgstr ""
-
 msgctxt "CheckResultInfo"
 msgid "label"
 msgstr ""
 
-msgid "line"
+msgctxt "TestConfig"
+msgid "label"
 msgstr ""
 
-msgid ""
-"link to a vcsfile repository, may be used to replace vcs_repository_type / "
-"vcs_repository to have deeper integration."
+msgid "label for this configuration (useful when name isn't unique)"
 msgstr ""
 
 msgid ""
@@ -588,10 +559,10 @@
 msgid "local_repository"
 msgstr "repository entity"
 
-msgctxt "Repository"
 msgid "local_repository_object"
 msgstr "used by test config"
 
+msgctxt "Repository"
 msgid "local_repository_object"
 msgstr "used by test config"
 
@@ -613,22 +584,16 @@
 msgid "log_file"
 msgstr "archive"
 
+msgid "log_file_object"
+msgstr "archive containing environnement used by"
+
 msgctxt "File"
 msgid "log_file_object"
 msgstr "archive containing environnement used by"
 
-msgid "log_file_object"
-msgstr "archive containing environnement used by"
-
-msgid "logs"
-msgstr ""
-
 msgid "manual"
 msgstr ""
 
-msgid "message"
-msgstr ""
-
 msgid "missing"
 msgstr ""
 
@@ -638,15 +603,10 @@
 msgid "more information"
 msgstr ""
 
-msgctxt "CheckResult"
 msgid "name"
 msgstr ""
 
-msgctxt "TestConfig"
-msgid "name"
-msgstr ""
-
-msgctxt "TestConfigGroup"
+msgctxt "CheckResult"
 msgid "name"
 msgstr ""
 
@@ -654,16 +614,17 @@
 msgid "name"
 msgstr ""
 
+msgctxt "TestConfig"
 msgid "name"
 msgstr ""
 
 msgid "name for this configuration"
 msgstr ""
 
-msgid "name for this configuration group"
+msgid "name for this environment"
 msgstr ""
 
-msgid "name for this environment"
+msgid "narval.recipe.tab_executions"
 msgstr ""
 
 msgid "nc"
@@ -672,24 +633,6 @@
 msgid "need preprocessor"
 msgstr ""
 
-msgid "needs_checkout"
-msgstr "needs checkout"
-
-msgctxt "TestConfig"
-msgid "needs_checkout"
-msgstr "needs checkout"
-
-msgctxt "ProjectEnvironment"
-msgid "needs_checkout"
-msgstr "needs checkout"
-
-msgctxt "ProjectEnvironment"
-msgid "needs_checkout_object"
-msgstr "needed by"
-
-msgid "needs_checkout_object"
-msgstr "needed by"
-
 msgid "no"
 msgstr ""
 
@@ -708,41 +651,37 @@
 msgid "on new revision"
 msgstr ""
 
+msgid "on_environment"
+msgstr "on environment"
+
+msgctxt "TestDependency"
+msgid "on_environment"
+msgstr "on environment"
+
+msgid "on_environment_object"
+msgstr "dependency of"
+
+msgctxt "ProjectEnvironment"
+msgid "on_environment_object"
+msgstr "dependency of"
+
 msgid "option"
 msgstr ""
 
+msgctxt "TestExecution"
+msgid "options"
+msgstr ""
+
 msgid "partial"
 msgstr ""
 
-msgid "path or command"
-msgstr ""
-
-msgid "path or url to the vcs repository containing the project"
-msgstr ""
-
-msgid "path relative to the checkout directory to be considered by tests"
-msgstr ""
-
-msgid "pe_config"
-msgstr "configuration"
-
-msgid "pe_executions"
-msgstr "executions"
-
-msgid "permalink to this message"
-msgstr ""
-
 msgid "preprocessor"
 msgstr ""
 
 msgid "preprocessor/checker options (one per line)"
 msgstr ""
 
-msgid ""
-"preprocessors to use for this project for install, debian, build_doc... (one "
-"per line)"
-msgstr ""
-
+msgctxt "TestExecution"
 msgid "priority"
 msgstr ""
 
@@ -752,16 +691,27 @@
 msgid "project environment in which this test config should be launched"
 msgstr ""
 
-msgid ""
-"project's environments that should be installed from their repository to "
-"execute test with this configuration"
-msgstr ""
+msgid "refinement_of"
+msgstr "refinement of"
+
+msgctxt "ProjectEnvironment"
+msgid "refinement_of"
+msgstr "refinement of environment"
+
+msgctxt "TestConfig"
+msgid "refinement_of"
+msgstr "refinement of configuration"
 
-msgid "quick tests summary"
-msgstr ""
+msgid "refinement_of_object"
+msgstr "refined by"
 
-msgid "regroup some common configuration used by multiple projects"
-msgstr ""
+msgctxt "ProjectEnvironment"
+msgid "refinement_of_object"
+msgstr "refined by environment"
+
+msgctxt "TestConfig"
+msgid "refinement_of_object"
+msgstr "refined by configuration"
 
 msgid "relative path to the project into the repository"
 msgstr ""
@@ -789,9 +739,6 @@
 msgid "set up"
 msgstr ""
 
-msgid "severity"
-msgstr ""
-
 msgid ""
 "should tests for project environment depending on this test's environment be "
 "started when this test is automatically triggered"
@@ -803,9 +750,6 @@
 msgid "start test"
 msgstr ""
 
-msgid "start tests"
-msgstr ""
-
 msgid "start_mode"
 msgstr "start mode"
 
@@ -842,38 +786,16 @@
 msgid "status"
 msgstr ""
 
-msgctxt "TestConfig"
-msgid "subpath"
-msgstr ""
-
 msgid "success"
 msgstr ""
 
-msgid "task from another instance"
-msgstr ""
-
-msgid "tc_config"
-msgstr "configuration"
-
-msgid "tc_execution"
-msgstr "execution"
-
-msgid "te_setup"
-msgstr "environment setup"
-
-msgid "test(s) queued"
-msgstr ""
-
-msgid "this environment has no preprocessor configured."
+msgid "type"
 msgstr ""
 
 msgctxt "CheckResultInfo"
 msgid "type"
 msgstr ""
 
-msgid "type"
-msgstr ""
-
 msgid "use_environment"
 msgstr "environment"
 
@@ -881,29 +803,25 @@
 msgid "use_environment"
 msgstr "environment"
 
+msgid "use_environment_object"
+msgstr "test configuration"
+
 msgctxt "ProjectEnvironment"
 msgid "use_environment_object"
 msgstr "test configuration"
 
-msgid "use_environment_object"
-msgstr "test configuration"
-
-msgid "use_group"
-msgstr "use group"
-
-msgctxt "TestConfigGroup"
-msgid "use_group"
-msgstr "use group"
+msgid "use_recipe"
+msgstr "use recipe"
 
 msgctxt "TestConfig"
-msgid "use_group"
-msgstr "use group"
+msgid "use_recipe"
+msgstr "use recipe"
 
-msgctxt "TestConfigGroup"
-msgid "use_group_object"
+msgid "use_recipe_object"
 msgstr "used by"
 
-msgid "use_group_object"
+msgctxt "Recipe"
+msgid "use_recipe_object"
 msgstr "used by"
 
 msgid "using_config"
@@ -913,11 +831,25 @@
 msgid "using_config"
 msgstr "using configuration"
 
+msgid "using_config_object"
+msgstr "executions"
+
 msgctxt "TestConfig"
 msgid "using_config_object"
 msgstr "executions"
 
-msgid "using_config_object"
+msgid "using_environment"
+msgstr "for environment"
+
+msgctxt "TestExecution"
+msgid "using_environment"
+msgstr "for environment"
+
+msgid "using_environment_object"
+msgstr "executions"
+
+msgctxt "ProjectEnvironment"
+msgid "using_environment_object"
 msgstr "executions"
 
 msgid "using_revision"
@@ -927,17 +859,17 @@
 msgid "using_revision"
 msgstr "using revision"
 
+msgid "using_revision_object"
+msgstr "tested by"
+
 msgctxt "Revision"
 msgid "using_revision_object"
 msgstr "tested by"
 
-msgid "using_revision_object"
-msgstr "tested by"
-
-msgctxt "CheckResultInfo"
 msgid "value"
 msgstr ""
 
+msgctxt "CheckResultInfo"
 msgid "value"
 msgstr ""
 
@@ -951,19 +883,8 @@
 msgid "vcs_path"
 msgstr "vcs path"
 
-msgid "vcs_repository"
-msgstr "vcs repository"
-
-msgctxt "ProjectEnvironment"
-msgid "vcs_repository"
-msgstr "vcs repository"
-
-msgid "vcs_repository_type"
-msgstr "vcs type"
-
-msgctxt "ProjectEnvironment"
-msgid "vcs_repository_type"
-msgstr "vcs type"
+msgid "vcsfile repository holding the source code"
+msgstr ""
 
 msgid "version configuration"
 msgstr ""
@@ -971,6 +892,9 @@
 msgid "view details"
 msgstr ""
 
+msgid "waiting execution"
+msgstr ""
+
 msgid "weekly"
 msgstr ""
 
--- a/i18n/fr.po	Wed Jul 28 12:10:03 2010 +0200
+++ b/i18n/fr.po	Fri Sep 10 14:14:42 2010 +0200
@@ -5,15 +5,15 @@
 "PO-Revision-Date: 2009-02-16 12:11+0100\n"
 "Last-Translator: Logilab\n"
 "Language-Team: French <devel@logilab.fr.org>\n"
+"Language: fr\n"
 "MIME-Version: 1.0\n"
 "Content-Type: text/plain; charset=UTF-8\n"
 "Content-Transfer-Encoding: 8bit\n"
 "Generated-By: pygettext.py 1.5\n"
 "Plural-Forms: nplurals=2; plural=(n > 1);\n"
 
-#, python-format
-msgid " <i>(from group %s)</i>"
-msgstr "<i>(du groupe %s)</i>"
+msgid " (inherited)"
+msgstr " (hérité)"
 
 #, python-format
 msgid "%(name)s status changed from %(fromstate)s to %(tostate)s"
@@ -28,23 +28,12 @@
 msgstr "%(nb)s vérifications exécutées en %(dur)s"
 
 #, python-format
-msgid "%i pending tasks"
-msgstr "%i tâches en attente d'éxecution"
-
-#, python-format
-msgid "%i running tasks"
-msgstr "%i tâches en cours d'exécution"
-
-#, python-format
-msgid "%s now has %s"
-msgstr "%s a maintenant %s"
+msgid "%(testconfig)s now has %(resume)s"
+msgstr "%(testconfig)s a maintenant %(resume)s"
 
 msgid "* no change found in known repositories"
 msgstr "pas de changement trouvé dans les entrepôts connus"
 
-msgid "Apycot bot status"
-msgstr "Apycot : état du bot"
-
 msgid "Apycot executions"
 msgstr "Apycot : éxécutions"
 
@@ -54,15 +43,6 @@
 msgid "Available options:"
 msgstr "Options disponibles :"
 
-msgid "Available preprocessors:"
-msgstr "Préprocesseurs disponibles :"
-
-msgid "Bot is not available for the following reason:"
-msgstr "Le bot n'est pas disponible :"
-
-msgid "Bot is up and available."
-msgstr "Le bot est lancé et disponible"
-
 msgid "CheckResult"
 msgstr "Résultat de vérification"
 
@@ -75,26 +55,13 @@
 msgid "CheckResult_plural"
 msgstr "Résultats de test"
 
-msgid "DEBUG"
-msgstr "DEBUG"
-
-msgid "ERROR"
-msgstr "ERREUR"
+#, python-format
+msgid "Execution of %(pe)s/%(config)s on %(date)s"
+msgstr "Éxécution de %(pe)s/%(config)s le %(date)s"
 
 #, python-format
-msgid "Execution of %(config)s on %(date)s"
-msgstr "Exécution de la configuration %(config)s à %(date)s"
-
-#, python-format
-msgid "Execution of %(config)s#%(branch)s"
-msgstr "Exécution de la configuration %(config)s#%(branch)s"
-
-#, python-format
-msgid "Execution on %(date)s"
-msgstr "Exécution de %(date)s"
-
-msgid "FATAL"
-msgstr "FATAL"
+msgid "Execution of %(pe)s/%(config)s#%(branch)s"
+msgstr "Éxécution de %(pe)s/%(config)s#%(branch)s"
 
 msgid ""
 "First notice that you may miss some information if you're using some plugin "
@@ -103,20 +70,12 @@
 "Tout d'abord notez que certaines informations peuvent manquer si vous "
 "utilisez des plugins qui ne sont pas chargés par le bot apycot."
 
-msgid "HIGH"
-msgstr "haute"
-
-msgid "INFO"
-msgstr "INFO"
-
-msgid "LOW"
-msgstr "basse"
-
-msgid "MEDIUM"
-msgstr "moyenne"
-
-msgid "Message Threshold"
-msgstr "Filtrer les messages inférieurs à :"
+msgid ""
+"Inherited start mode can only be used if the configuration refines another "
+"one"
+msgstr ""
+"Le mode de lancement 'hérité' ne peut-être utilisé que si la configuration "
+"en raffine une autre"
 
 msgid "New CheckResult"
 msgstr "Nouveau résultat de vérification"
@@ -130,18 +89,12 @@
 msgid "New TestConfig"
 msgstr "Nouvelle configuration de test"
 
-msgid "New TestConfigGroup"
-msgstr "Nouveau groupe de configuration de test"
+msgid "New TestDependency"
+msgstr "Nouvelle dépendance de test"
 
 msgid "New TestExecution"
 msgstr "Nouvelle exécution"
 
-msgid "No pending task"
-msgstr "Pas de tâche en attente"
-
-msgid "No running task"
-msgstr "Pas de tâche en cours d'exécution"
-
 msgid "ProjectEnvironment"
 msgstr "Environnement de projet"
 
@@ -176,28 +129,18 @@
 msgid "Test executions summary"
 msgstr "Rapport d'exécution des tests"
 
-msgctxt "inlined:ProjectEnvironment.use_environment.object"
-msgid "TestConfig"
-msgstr "Configuration de test"
-
 msgid "TestConfig"
 msgstr "Configuration de test"
 
-msgid "TestConfig without checkers can not be executed."
-msgstr "Impossible d'éxécuter une configuration de test sans vérification."
-
-# schema pot file, generated on 2009-01-14 12:58:20
-#
-# singular and plural forms for each entity type
-msgid "TestConfigGroup"
-msgstr "Groupe de configuration de test"
-
-msgid "TestConfigGroup_plural"
-msgstr "Groupes de configuration de test"
-
 msgid "TestConfig_plural"
 msgstr "Configurations de test"
 
+msgid "TestDependency"
+msgstr "Dépendance de test"
+
+msgid "TestDependency_plural"
+msgstr "Dépendances de test"
+
 # schema pot file, generated on 2008-12-23 10:18:45
 #
 # singular and plural forms for each entity type
@@ -219,15 +162,12 @@
 msgid "This TestConfig"
 msgstr "Cette configuration de test"
 
-msgid "This TestConfigGroup"
-msgstr "Ce groupe de configuration"
+msgid "This TestDependency"
+msgstr "Cette dépendance de test"
 
 msgid "This TestExecution"
 msgstr "Cette éxecution"
 
-msgid "WARNING"
-msgstr "AVERTISSEMENT"
-
 msgid "activate"
 msgstr "activer"
 
@@ -243,12 +183,11 @@
 msgid "add TestConfig use_environment ProjectEnvironment object"
 msgstr "configuration de test"
 
-msgctxt "inlined:ProjectEnvironment.use_environment.object"
-msgid "add a TestConfig"
-msgstr "ajouter une configuration de test"
+msgid "add TestDependency for_environment ProjectEnvironment object"
+msgstr "dépendance de test"
 
-msgid "apycot"
-msgstr "apycot"
+msgid "add TestDependency for_testconfig TestConfig object"
+msgstr "dépendance de test"
 
 msgid "apycot configuration to register a project branch to test"
 msgstr "configuration de test enregistrant une branche de projet à tester"
@@ -256,8 +195,14 @@
 msgid "apycot documentation"
 msgstr "documentation apycot"
 
-msgid "apycotbot"
-msgstr "apycotbot"
+msgid "apycot.pe.tab_config"
+msgstr "configuration"
+
+msgid "apycot.tc.tab_config"
+msgstr "configuration"
+
+msgid "apycot.te.tab_setup"
+msgstr "mise en place de l'environnement"
 
 msgid "apycottestresults_tab"
 msgstr "rapport d'exécution des tests"
@@ -271,6 +216,12 @@
 msgid "archivetestdir"
 msgstr "Archiver le répertoire temporaire"
 
+# subject and object forms for each relation type
+# (no object form for final or symmetric relation types)
+msgctxt "TestExecution"
+msgid "arguments"
+msgstr "arguments"
+
 msgid "automatic test status"
 msgstr "état de test automatique"
 
@@ -282,14 +233,18 @@
 "Boîte permettant de télécharger une archive contenant l'environnement d'une "
 "éxécution de test."
 
+msgid "branch"
+msgstr "branche"
+
 # subject and object forms for each relation type
 # (no object form for final or symetric relation types)
 msgctxt "TestExecution"
 msgid "branch"
 msgstr "branche"
 
-msgid "branch"
-msgstr "branche"
+#, python-format
+msgid "branch=\"%s\"<br/>"
+msgstr "branche=\"%s\"<br/>"
 
 msgid "changes_rss_exec_button"
 msgstr "Statut"
@@ -302,10 +257,6 @@
 msgid "check_config"
 msgstr "configuration"
 
-msgctxt "TestConfigGroup"
-msgid "check_config"
-msgstr "configuration"
-
 msgctxt "ProjectEnvironment"
 msgid "check_config"
 msgstr "configuration"
@@ -317,10 +268,6 @@
 msgid "check_environment"
 msgstr "environnement"
 
-msgctxt "TestConfigGroup"
-msgid "check_environment"
-msgstr "environnement"
-
 msgctxt "ProjectEnvironment"
 msgid "check_environment"
 msgstr "environnement"
@@ -329,31 +276,18 @@
 msgid "check_environment"
 msgstr "environnement"
 
-msgid "check_preprocessors"
-msgstr "préprocesseurs"
-
-msgctxt "ProjectEnvironment"
-msgid "check_preprocessors"
-msgstr "préprocesseurs"
-
 msgid "checker"
 msgstr "vérification"
 
 msgid "checks"
 msgstr "vérifications"
 
-msgctxt "TestConfigGroup"
-msgid "checks"
-msgstr "vérifications"
+msgid "computed_start_mode"
+msgstr "mode de lancement"
 
 msgctxt "TestConfig"
-msgid "checks"
-msgstr "vérifications"
-
-msgid "comma separated list of checks to execute in this test config"
-msgstr ""
-"liste des vérifications à effectuer pour cette configuration, séparés par "
-"des virgules"
+msgid "computed_start_mode"
+msgstr "mode de lancement"
 
 msgid "contentnavigation_all_execution_subscribe_rss"
 msgstr "Souscrire à toutes les exécutions"
@@ -367,12 +301,6 @@
 msgid "contentnavigation_changes_execution_subscribe_rss_description"
 msgstr "Icones pour souscrire à tous les changements de status"
 
-msgid "contentnavigation_starttestform"
-msgstr "lancement d'une configuration de test"
-
-msgid "contentnavigation_starttestform_description"
-msgstr "section permettant de lancer l'exécution d'une configuration de test"
-
 msgid ""
 "creating ProjectEnvironment (Project %(linkto)s has_apycot_environment "
 "ProjectEnvironment)"
@@ -388,6 +316,15 @@
 "s)"
 msgstr "création d'une configuration de test pour l'environnement %(linkto)s"
 
+msgid ""
+"creating TestDependency (TestDependency for_environment ProjectEnvironment "
+"%(linkto)s)"
+msgstr "création d'une dépendance de test pour l'environnement %(linkto)s"
+
+msgid ""
+"creating TestDependency (TestDependency for_testconfig TestConfig %(linkto)s)"
+msgstr "création d'une dépendance de test pour la configuration %(linkto)s"
+
 msgid "daily"
 msgstr "journalier"
 
@@ -410,10 +347,10 @@
 msgid "during_execution"
 msgstr "durant l'exécution"
 
-msgctxt "TestExecution"
 msgid "during_execution_object"
 msgstr "vérifications"
 
+msgctxt "TestExecution"
 msgid "during_execution_object"
 msgstr "vérifications"
 
@@ -444,15 +381,24 @@
 msgid "error"
 msgstr "erreur"
 
-msgid "execution"
-msgstr "éxécution"
-
 msgid "execution information"
 msgstr "information sur l'exécution"
 
 msgid "execution priority"
 msgstr "priorité d'exécution"
 
+msgctxt "TestExecution"
+msgid "execution_log"
+msgstr "journal d'éxécution"
+
+msgctxt "TestExecution"
+msgid "execution_of"
+msgstr "journal d'éxécution"
+
+msgctxt "TestExecution"
+msgid "execution_status"
+msgstr "état d'éxecution"
+
 msgid "facets_apycot.tc.env"
 msgstr "facette environnement d'une configuration"
 
@@ -520,6 +466,9 @@
 msgid "for_check"
 msgstr "pour la vérification"
 
+msgid "for_check_object"
+msgstr "à propos de"
+
 msgctxt "CheckResult"
 msgid "for_check_object"
 msgstr "à propos de"
@@ -528,8 +477,33 @@
 msgid "for_check_object"
 msgstr "à propos de"
 
-msgid "for_check_object"
-msgstr "à propos de"
+msgid "for_environment"
+msgstr "pour l'environnement"
+
+msgctxt "TestDependency"
+msgid "for_environment"
+msgstr "pour l'environnement"
+
+msgid "for_environment_object"
+msgstr "dépendance de"
+
+msgctxt "ProjectEnvironment"
+msgid "for_environment_object"
+msgstr "dépendance de"
+
+msgid "for_testconfig"
+msgstr "avec la configuration"
+
+msgctxt "TestDependency"
+msgid "for_testconfig"
+msgstr "avec la configuration"
+
+msgid "for_testconfig_object"
+msgstr "a pour dépendance"
+
+msgctxt "TestConfig"
+msgid "for_testconfig_object"
+msgstr "a pour dépendance"
 
 msgid "group results of execution of a specific test on a project"
 msgstr "regroupe les résultats de l'exécution d'une vérification sur un projet"
@@ -541,31 +515,35 @@
 msgid "has_apycot_environment"
 msgstr "environnement de test"
 
-msgctxt "ProjectEnvironment"
 msgid "has_apycot_environment_object"
 msgstr "projet"
 
+msgctxt "ProjectEnvironment"
 msgid "has_apycot_environment_object"
 msgstr "projet"
 
 msgid "hourly"
 msgstr "chaque heure"
 
-msgid "inherited from group"
-msgstr "hérité du groupe"
+msgid "inherited"
+msgstr "hérité"
 
-msgid "install type"
-msgstr "type d'installation"
+msgid "inherited from"
+msgstr "hérité de"
+
+#, python-format
+msgid "inherited from %s"
+msgstr "hérité de %s"
 
 msgctxt "CWUser"
 msgid "interested_in"
 msgstr "intéressé par"
 
-msgctxt "TestConfig"
+msgctxt "ProjectEnvironment"
 msgid "interested_in_object"
 msgstr "intéresse"
 
-msgctxt "ProjectEnvironment"
+msgctxt "TestConfig"
 msgid "interested_in_object"
 msgstr "intéresse"
 
@@ -575,28 +553,16 @@
 msgid "killed"
 msgstr "tué"
 
-msgid ""
-"kind of version control system (vcs): hg (mercurial), svn (subversion), cvs "
-"(CVS), fs (file system, eg no version control)"
-msgstr ""
-"type de système de gestion de source (vcs) : hg (mercurial), svn "
-"(subversion), cvs (CVS), fs (système de fichiers, çàd pas de gestion de "
-"source)"
-
 msgctxt "CheckResultInfo"
 msgid "label"
 msgstr "libellé"
 
-msgid "line"
-msgstr "ligne"
+msgctxt "TestConfig"
+msgid "label"
+msgstr "label"
 
-msgid ""
-"link to a vcsfile repository, may be used to replace vcs_repository_type / "
-"vcs_repository to have deeper integration."
-msgstr ""
-"lien vers une entité entrepôt (cube vcsfile), peut-être utilisé pour "
-"remplacer les attributs de la configuration caractérisant l'entrepôt et "
-"bénéficier d'une meilleure intégration."
+msgid "label for this configuration (useful when name isn't unique)"
+msgstr "titre pour cette configuration (utile quand son nom n'est pas unique)"
 
 msgid ""
 "link to revision which has been used in the test environment for "
@@ -612,10 +578,10 @@
 msgid "local_repository"
 msgstr "entrepôt local"
 
-msgctxt "Repository"
 msgid "local_repository_object"
 msgstr "utilisé par"
 
+msgctxt "Repository"
 msgid "local_repository_object"
 msgstr "utilisé par"
 
@@ -637,22 +603,16 @@
 msgid "log_file"
 msgstr "archive"
 
+msgid "log_file_object"
+msgstr "archive containing the environment of"
+
 msgctxt "File"
 msgid "log_file_object"
 msgstr "archive containing the environment of"
 
-msgid "log_file_object"
-msgstr "archive containing the environment of"
-
-msgid "logs"
-msgstr "messages"
-
 msgid "manual"
 msgstr "manuel"
 
-msgid "message"
-msgstr "message"
-
 msgid "missing"
 msgstr "manquant"
 
@@ -662,15 +622,10 @@
 msgid "more information"
 msgstr "plus d'information"
 
-msgctxt "CheckResult"
 msgid "name"
 msgstr "nom"
 
-msgctxt "TestConfig"
-msgid "name"
-msgstr "nom"
-
-msgctxt "TestConfigGroup"
+msgctxt "CheckResult"
 msgid "name"
 msgstr "nom"
 
@@ -678,42 +633,25 @@
 msgid "name"
 msgstr "nom"
 
+msgctxt "TestConfig"
 msgid "name"
 msgstr "nom"
 
 msgid "name for this configuration"
 msgstr "nom pour cette configuration"
 
-msgid "name for this configuration group"
-msgstr "nom pour ce grouop"
-
 msgid "name for this environment"
 msgstr "nom pour cette environnement"
 
+msgid "narval.recipe.tab_executions"
+msgstr "éxécutions"
+
 msgid "nc"
 msgstr "-"
 
 msgid "need preprocessor"
 msgstr "nécessite le préprocesseur"
 
-msgid "needs_checkout"
-msgstr "dépend de"
-
-msgctxt "TestConfig"
-msgid "needs_checkout"
-msgstr "dépend de"
-
-msgctxt "ProjectEnvironment"
-msgid "needs_checkout"
-msgstr "dépend de"
-
-msgctxt "ProjectEnvironment"
-msgid "needs_checkout_object"
-msgstr "dépendance de"
-
-msgid "needs_checkout_object"
-msgstr "dépendance de"
-
 msgid "no"
 msgstr "non"
 
@@ -732,44 +670,37 @@
 msgid "on new revision"
 msgstr "à chaque nouvelle révision"
 
+msgid "on_environment"
+msgstr "sur l'environnement"
+
+msgctxt "TestDependency"
+msgid "on_environment"
+msgstr "sur l'environnement"
+
+msgid "on_environment_object"
+msgstr "dépendance de"
+
+msgctxt "ProjectEnvironment"
+msgid "on_environment_object"
+msgstr "dépendance de"
+
 msgid "option"
 msgstr "option"
 
+msgctxt "TestExecution"
+msgid "options"
+msgstr "options"
+
 msgid "partial"
 msgstr "partiel"
 
-msgid "path or command"
-msgstr "chemin ou commande"
-
-msgid "path or url to the vcs repository containing the project"
-msgstr "url ou chemin vers l'entrepôt contenant ce projet"
-
-msgid "path relative to the checkout directory to be considered by tests"
-msgstr ""
-"chemin relatif à l'entrepôt d'un répertoire auquel restreindre les tests"
-
-msgid "pe_config"
-msgstr "configuration"
-
-msgid "pe_executions"
-msgstr "exécutions"
-
-msgid "permalink to this message"
-msgstr "Lien permanent vers ce message"
-
 msgid "preprocessor"
 msgstr "préprocesseur"
 
 msgid "preprocessor/checker options (one per line)"
 msgstr "options des préprocesseurs / vérificateurs (une par ligne)"
 
-msgid ""
-"preprocessors to use for this project for install, debian, build_doc... (one "
-"per line)"
-msgstr ""
-"préprocesseurs à utiliser pour ce projet (install, debian, build_doc...), un "
-"par ligne"
-
+msgctxt "TestExecution"
 msgid "priority"
 msgstr "priorité"
 
@@ -779,18 +710,27 @@
 msgid "project environment in which this test config should be launched"
 msgstr "environnement dans lequel ce test doit être lancé"
 
-msgid ""
-"project's environments that should be installed from their repository to "
-"execute test with this configuration"
-msgstr ""
-"environnements devant être installés depuis leur entrepôt pour lancer des "
-"tests avec cette configuration"
+msgid "refinement_of"
+msgstr "raffine"
+
+msgctxt "ProjectEnvironment"
+msgid "refinement_of"
+msgstr "raffine l'environnement"
+
+msgctxt "TestConfig"
+msgid "refinement_of"
+msgstr "raffine la configuration"
 
-msgid "quick tests summary"
-msgstr "rapport de test"
+msgid "refinement_of_object"
+msgstr "raffiner par"
 
-msgid "regroup some common configuration used by multiple projects"
-msgstr "regroupe de la configuration commune à plusieurs projets"
+msgctxt "ProjectEnvironment"
+msgid "refinement_of_object"
+msgstr "raffiner par"
+
+msgctxt "TestConfig"
+msgid "refinement_of_object"
+msgstr "raffiner par"
 
 msgid "relative path to the project into the repository"
 msgstr "chemin relatif vers le projet dans l'entrepot"
@@ -818,9 +758,6 @@
 msgid "set up"
 msgstr "mise en place de l'environnement"
 
-msgid "severity"
-msgstr "sévérité"
-
 msgid ""
 "should tests for project environment depending on this test's environment be "
 "started when this test is automatically triggered"
@@ -834,9 +771,6 @@
 msgid "start test"
 msgstr "lancer l'exécution"
 
-msgid "start tests"
-msgstr "lancer les tests"
-
 msgid "start_mode"
 msgstr "mode de lancement"
 
@@ -873,35 +807,13 @@
 msgid "status"
 msgstr "état"
 
-msgctxt "TestConfig"
-msgid "subpath"
-msgstr "sous-chemin"
-
 msgid "success"
 msgstr "succès"
 
-msgid "task from another instance"
-msgstr "tâche appartenant à une autre instance"
-
-msgid "tc_config"
-msgstr "configuration"
-
-msgid "tc_execution"
-msgstr "exécutions"
-
-msgid "te_setup"
-msgstr "mise en place de l'environnement"
-
-msgid "test(s) queued"
-msgstr "test(s) ajouté à la file d'attente"
-
-msgid "this environment has no preprocessor configured."
-msgstr "cet environnement n'a pas de préprocesseur configuré"
-
-msgctxt "CheckResultInfo"
 msgid "type"
 msgstr "type"
 
+msgctxt "CheckResultInfo"
 msgid "type"
 msgstr "type"
 
@@ -912,29 +824,25 @@
 msgid "use_environment"
 msgstr "environnement"
 
+msgid "use_environment_object"
+msgstr "configuration de test"
+
 msgctxt "ProjectEnvironment"
 msgid "use_environment_object"
 msgstr "configuration de test"
 
-msgid "use_environment_object"
-msgstr "configuration de test"
-
-msgid "use_group"
-msgstr "utilise le groupe"
-
-msgctxt "TestConfigGroup"
-msgid "use_group"
-msgstr "utilise le groupe"
+msgid "use_recipe"
+msgstr "utilise la recette"
 
 msgctxt "TestConfig"
-msgid "use_group"
-msgstr "utilise le groupe"
+msgid "use_recipe"
+msgstr "utilise la recette"
 
-msgctxt "TestConfigGroup"
-msgid "use_group_object"
+msgid "use_recipe_object"
 msgstr "utilisé par"
 
-msgid "use_group_object"
+msgctxt "Recipe"
+msgid "use_recipe_object"
 msgstr "utilisé par"
 
 msgid "using_config"
@@ -944,12 +852,26 @@
 msgid "using_config"
 msgstr "configuration"
 
+msgid "using_config_object"
+msgstr "exécutions"
+
 msgctxt "TestConfig"
 msgid "using_config_object"
 msgstr "exécutions"
 
-msgid "using_config_object"
-msgstr "exécutions"
+msgid "using_environment"
+msgstr "pour l'environnement"
+
+msgctxt "TestExecution"
+msgid "using_environment"
+msgstr "pour l'environnement"
+
+msgid "using_environment_object"
+msgstr "éxécutions"
+
+msgctxt "ProjectEnvironment"
+msgid "using_environment_object"
+msgstr "éxécutions"
 
 msgid "using_revision"
 msgstr "avec la révision"
@@ -958,17 +880,17 @@
 msgid "using_revision"
 msgstr "avec la révision"
 
+msgid "using_revision_object"
+msgstr "rapport de test"
+
 msgctxt "Revision"
 msgid "using_revision_object"
 msgstr "rapport de test"
 
-msgid "using_revision_object"
-msgstr "rapport de test"
-
-msgctxt "CheckResultInfo"
 msgid "value"
 msgstr "valeur"
 
+msgctxt "CheckResultInfo"
 msgid "value"
 msgstr "valeur"
 
@@ -982,19 +904,8 @@
 msgid "vcs_path"
 msgstr "chemin relatif dans le système de gestion de source"
 
-msgid "vcs_repository"
-msgstr "url du système de gestion de source"
-
-msgctxt "ProjectEnvironment"
-msgid "vcs_repository"
-msgstr "url du système de gestion de source"
-
-msgid "vcs_repository_type"
-msgstr "type système de gestion de source"
-
-msgctxt "ProjectEnvironment"
-msgid "vcs_repository_type"
-msgstr "type système de gestion de source"
+msgid "vcsfile repository holding the source code"
+msgstr "entrepôt de gestion des sources de l'environnement"
 
 msgid "version configuration"
 msgstr "configuration de version"
@@ -1002,6 +913,9 @@
 msgid "view details"
 msgstr "voir les détails"
 
+msgid "waiting execution"
+msgstr "En attente d'éxécution"
+
 msgid "weekly"
 msgstr "chaque semaine"
 
--- a/logformat.py	Wed Jul 28 12:10:03 2010 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,121 +0,0 @@
-# coding: utf-8
-"""utilities to turn apycot raw logs into nice html reports
-
-nn:organization: Logilab
-:copyright: 2008-2009 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
-:license: General Public License version 2 - http://www.gnu.org/licenses
-"""
-__docformat__ = "restructuredtext en"
-_ = unicode
-
-import logging
-from cubicweb.utils import make_uid
-
-
-REVERSE_SEVERITIES = {
-    logging.DEBUG :   _('DEBUG'),
-    logging.INFO :    _('INFO'),
-    logging.WARNING : _('WARNING'),
-    logging.ERROR :   _('ERROR'),
-    logging.FATAL :   _('FATAL')
-    }
-
-
-def log_to_html(req, data, w):
-    """format apycot logs data to an html table
-
-    log are encoded by the apycotbot in the following format for each record:
-
-      encodedmsg = u'%s\t%s\t%s\t%s<br/>' % (severity, path, line,
-                                             xml_escape(msg))
-
-    """
-    # XXX severity filter / link to viewcvs or similar
-    req.add_js('cubes.apycot.js')
-    req.add_js('jquery.tablesorter.js')
-    req.add_css('cubicweb.tablesorter.css')
-
-
-    req.add_onload('$("select.log_filter").val("%s").change();'
-                    %  req.form.get('log_level', 'Info'))
-    w(u'<form>')
-    w(u'<label>%s</label>' % _(u'Message Threshold'))
-    w(u'<select class="log_filter" onchange="filter_log(this.options[this.selectedIndex].value)">')
-    for level in ('Debug', 'Info', 'Warning', 'Error', 'Fatal'):
-        w('<option value="%s">%s</option>' % (level, _(level)))
-    w(u'</select>')
-    w(u'</form>')
-
-
-    w(u'<table class="listing apylog">')
-    w(u'<tr><th>%s</th><th>%s</th><th>%s</th><th>%s</th></tr>' % (
-        req._('severity'), req._('path or command'), req._('line'), req._('message')))
-    for msg_idx, record in enumerate(data.split('<br/>')):
-        record = record.strip()
-        if not record:
-            continue
-        try:
-            severity, path, line, msg = record.split('\t', 3)
-        except:
-            req.warning('badly formated apycot log %s' % record)
-            continue
-        severityname = REVERSE_SEVERITIES[int(severity)]
-        log_msg_id = 'log_msg_%i' % msg_idx
-        w(u'<tr class="log%s" id="%s">' % (severityname.capitalize(),
-                                           log_msg_id))
-        w(u'<td class="logSeverity" cubicweb:sortvalue="%s">' % severity)
-        data = {
-            'severity': req._(REVERSE_SEVERITIES[int(severity)]),
-            'title': _('permalink to this message'),
-            'msg_id': log_msg_id,
-        }
-        w(u'''<a class="internallink" href="javascript:;" title="%(title)s" '''
-          u'''onclick="document.location.hash='%(msg_id)s';">&#182;</a>'''
-          u'''&#160;%(severity)s''' % data)
-        w(u'</td>')
-        w(u'<td class="logPath">%s</td>' % (path or u'&#160;'))
-        w(u'<td class="logLine">%s</td>' % (line or u'&#160;'))
-
-        w(u'<td class="logMsg">')
-
-
-        SNIP_OVER = 7
-
-        lines = msg.splitlines()
-        if len(lines) <= SNIP_OVER:
-            w(u'<pre class="rawtext">%s</pre>' % msg)
-        else:
-            # The make_uid argument have not specific meaning here.
-            div_snip_id = make_uid(u'log_snip_')
-            div_full_id = make_uid(u'log_full_')
-            divs_id = (div_snip_id, div_full_id)
-            snip = u'\n'.join((lines[0],
-                               lines[1],
-                               u'  ...',
-                               u'    %i more lines [double click to expand]' % (len(lines)-4),
-                               u'  ...',
-                               lines[-2],
-                               lines[-1]))
-
-
-            divs = (
-                (div_snip_id, snip, u'expand', "class='collapsed'"),
-                (div_full_id, msg,  u'collapse', "class='hidden'")
-            )
-            for div_id, content, button, h_class in divs:
-                text = _(button)
-                js   = u"toggleVisibility('%s'); toggleVisibility('%s');" % divs_id
-
-                w(u'<div id="%s" %s>' % (div_id, h_class))
-                w(u'<pre class="raw_test" ondblclick="javascript: %s" '
-                   'title="%s" style="display: block;">' % (js, text))
-                w(u'%s' % content)
-                w(u'</pre>')
-                w(u'</div>')
-
-
-        w(u'</td>')
-
-        w(u'</tr>\n')
-    w(u'</table>')
--- a/migration/0.2.0_Any.py	Wed Jul 28 12:10:03 2010 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,31 +0,0 @@
-add_entity('CWGroup', name=_('apycot'))
-add_entity('CWUser', login=_('apycotbot'), upassword='apycot')
-rql('SET U in_group G WHERE U login "apycotbot", G name "apycot"')
-rql('SET U in_group G WHERE U login "apycotbot", G name "guests"')
-
-add_entity_type('ApycotConfigGroup')
-
-add_relation_definition('ProjectApycotConfig', 'in_state', 'State')
-add_relation_definition('TrInfo', 'wf_info_for', 'ProjectApycotConfig')
-
-for etype in ('ProjectApycotConfig',
-              'ApycotExecution', 'CheckResult',
-              'CheckResultLog', 'CheckResultInfo'):
-    synchronize_eschema(etype)
-for rtype in ('has_apycot_config', 'for_check', 'using_config', 'during_execution'):
-    if rtype in schema:
-        synchronize_rschema(rtype)
-
-
-activatedeid = add_state(_('activated'), 'ProjectApycotConfig', initial=True)
-deactivatedeid = add_state(_('deactivated'), 'ProjectApycotConfig')
-add_transition(_('deactivate'), 'ProjectApycotConfig',
-               (activatedeid,), deactivatedeid,
-               requiredgroups=('managers',))
-add_transition(_('activate'), 'ProjectApycotConfig',
-               (deactivatedeid,), activatedeid,
-               requiredgroups=('managers',))
-checkpoint()
-
-rql('SET X in_state S WHERE X is ProjectApycotConfig, S name "activated", S state_of ET, X is ET')
-checkpoint()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/migration/2.0.0_Any.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,139 @@
+import os
+
+add_relation_type('refinement_of') # other stuff will be implicitly added by add_cube side effect...
+
+repo.system_source.dbhelper.set_null_allowed(
+    session.pool['system'], 'cw_TestConfig', 'cw_start_mode', 'varchar(15)', True)
+rename_entity_type('TestConfigGroup', 'TestConfig',
+                   attrs=('name', 'check_config', 'check_environment'))
+rql('SET X refinement_of Y WHERE X use_group Y', ask_confirm=False) # XXX pb if use multiple groups
+sql("UPDATE cw_TestConfig SET cw_start_mode='inherited' WHERE cw_start_mode IS NULL")
+repo.system_source.dbhelper.set_null_allowed(
+    session.pool['system'], 'cw_TestConfig', 'cw_start_mode', 'varchar(15)', False)
+
+
+drop_relation_type('use_group')
+
+add_cube('narval')
+
+
+rql('SET X start_mode "manual" WHERE NOT X refinement_of Y, X start_mode "inherited"')
+rql('SET X computed_start_mode SM WHERE X start_mode SM, NOT X start_mode "inherited"')
+
+add_attribute('TestConfig', 'label') # except this one, dunno why
+
+for tc in rqliter('Any X, XS, XC WHERE X is TestConfig, NOT X subpath NULL, '
+                  'X subpath XS, X check_config XC').entities():
+    if tc.check_config:
+        config = '%s\nsubpath=%s' % (tc.check_config, tc.subpath)
+    else:
+        config = 'subpath=%s' % tc.subpath
+    tc.set_attributes(check_config=config)
+drop_attribute('TestConfig' , 'subpath')
+
+process_script(os.path.join(os.path.dirname(__file__), 'create_recipes.py'))
+
+drop_attribute('TestConfig', 'checks')
+
+for pe, tc, dpe in rqliter('Any PE,TC,DPE WHERE TC use_environment PE, TC needs_checkout DPE',
+                           ask_confirm=True):
+    rql('INSERT TestDependency X: X for_environment PE, X for_testconfig TC, X on_environment DPE'
+        ' WHERE TC eid %(tc)s, PE eid %(pe)s, DPE eid %(dpe)s',
+        {'tc': tc, 'pe': pe, 'dpe': dpe}, ask_confirm=False)
+# PE needs_checkout PE when on PE's project dependency/recommend not needed,
+# only backport TC needs_checkout PE if apycot as forge extension
+if 'Project' not in schema:
+    for pe, tc, dpe in rqliter('Any PE,TC,DPE WHERE TC use_environment PE, PE needs_checkout DPE',
+                               ask_confirm=True):
+        rql('INSERT TestDependency X: X for_environment PE, X for_testconfig TC, X on_environment DPE'
+            ' WHERE TC eid %(tc)s, PE eid %(pe)s, DPE eid %(dpe)s',
+            {'tc': tc, 'pe': pe, 'dpe': dpe}, ask_confirm=False)
+
+drop_relation_type('needs_checkout')
+
+for pe in rqliter('Any PE,PEPP,PEC WHERE PE is ProjectEnvironment,'
+                  'PE check_preprocessors PEPP, PE check_config PEC',
+                  ask_confirm=True).entities():
+    if pe.check_preprocessors:
+        if pe.check_config:
+            cfg = u'%s\n%s' % (pe.check_config, pe.check_preprocessors)
+        else:
+            cfg = pe.check_preprocessors
+        pe.set_attributes(check_config=cfg)
+
+drop_attribute('ProjectEnvironment' , 'check_preprocessors')
+
+rql('SET X execution_status "done" WHERE X is TestExecution')
+
+# remove vcs_repository and vcs_repository_type (the local_repository
+# relation is used instead) What's done:
+# * only 'mercurial' and 'subversion' are automatically migrated (see
+# vcsfile cube)
+# * no change if 'local_repository' exists, use vcs_repository and
+# vcs_repository_type otherwise.
+
+repo_type_mapping = {'hg': u'mercurial',
+                     'svn': u'subversion'}
+
+for project in rqliter('Any P, R, T, L WHERE P is ProjectEnvironment, '
+                       'P vcs_repository R, P vcs_repository_type T, P local_repository L?',
+                       ask_confirm=False).entities():
+    repo_type = repo_type_mapping.get(project.vcs_repository_type)
+    if repo_type is None:
+        print ('WARNING: "%s" repository type is no more managed, '
+               'you have to manually upgrade %s.' % (project.vcs_repository_type,
+                                                     project))
+        continue
+    if not project.local_repository and project.vcs_repository:
+        reporset = rql('Repository X WHERE X path %(repo)s OR X source_url %(repo)s',
+                       {'repo': project.vcs_repository}, ask_confirm=False)
+        if reporset:
+            project.set_relations(local_repository=reporset.get_entity(0, 0))
+        elif project.vcs_repository.startswith('/'):
+            rql('INSERT Repository R: R type %(type)s, R path %(path)s, '
+                'P local_repository R WHERE P eid %(eid)s',
+                {'type': repo_type, 'path': project.vcs_repository,
+                 'eid': project.eid}, ask_confirm=True)
+        else:
+            rql('INSERT Repository R: R type %(type)s, R source_url %(path)s, '
+                'P local_repository R WHERE P eid %(eid)s',
+                {'type': repo_type, 'path': project.vcs_repository,
+                 'eid': project.eid}, ask_confirm=True)
+commit()
+
+drop_attribute('ProjectEnvironment', 'vcs_repository')
+drop_attribute('ProjectEnvironment', 'vcs_repository_type')
+
+sync_schema_props_perms('TestExecution')
+sync_schema_props_perms('TestConfig')
+sync_schema_props_perms('use_environment')
+sync_schema_props_perms('local_repository')
+for ertype in ('CheckResult', 'CheckResultInfo', 'Repository'):
+    sync_schema_props_perms(ertype, syncprops=False)
+
+rql('DELETE TestExecution TE WHERE TE branch NULL')
+
+rset = rql('Any TC, N WHERE NOT TC use_recipe R, TC name N', ask_confirm=False)
+if rset:
+    print '*TestConfig* that do not have a *Recipe*:'
+    print '\n'.join('  - %s (eid: %s)' % (entity.name, entity.eid)
+                    for entity in rset.entities())
+
+
+rset = rql('Any R WHERE R is Repository, R path NULL', ask_confirm=False)
+if rset:
+    print '*Repository* that do not have a *path*:'
+    print '\n'.join('  - %s (eid: %s)' % (entity.dc_title(), entity.eid)
+                    for entity in rset.entities())
+
+rset = rql('Any R WHERE R is Repository, R source_url NULL', ask_confirm=False)
+if rset:
+    print '*Repository* that do not have a *source_url*:'
+    print '\n'.join('  - %s (eid: %s)' % (entity.dc_title(), entity.eid)
+                    for entity in rset.entities())
+
+rset = rql('Any PE, N WHERE NOT PE local_repository LR, PE name N', ask_confirm=False)
+if rset:
+    print '*ProjectEnvironment* that do not have a *local_repository*:'
+    print '\n'.join('  - %s (eid: %s)' % (entity.name, entity.eid)
+                    for entity in rset.entities())
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/migration/create_recipes.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,15 @@
+from cubes.apycot import recipes
+
+rql('DELETE Recipe R', ask_confirm=False)
+
+for recipe in dir(recipes):
+    if recipe.startswith('create_'):
+        print recipe
+        getattr(recipes, recipe)(session)
+
+# define new recipes for current test config
+rql('SET X use_recipe Y WHERE X name "quick", Y name "apycot.recipe.quick"')
+rql('SET X use_recipe Y WHERE X name "package", Y name "apycot.recipe.debian"')
+rql('SET X use_recipe Y WHERE X name "full", Y name "apycot.recipe.full"')
+
+commit()
--- a/migration/postcreate.py	Wed Jul 28 12:10:03 2010 +0200
+++ b/migration/postcreate.py	Fri Sep 10 14:14:42 2010 +0200
@@ -8,9 +8,8 @@
 wf.add_transition(_('activate'), deactivated, activated,
                   requiredgroups=('managers',))
 
-create_entity('Bookmark', title=_('quick tests summary'),
-           path=u'view?rql=Any+X%2CXN+ORDERBY+XN+WHERE+X+is+TestConfig%2C+X+name+XN%2C+X+in_state+S%2C+S+name+%22activated%22&vid=summary')
-
+from cubes.apycot import recipes
+recipes.create_quick_recipe(session)
 
 if not config['pyro-server']:
     config.global_set_option('pyro-server', True)
--- a/migration/precreate.py	Wed Jul 28 12:10:03 2010 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,4 +0,0 @@
-create_entity('CWGroup', name=_('apycot'))
-create_entity('CWUser', login=_('apycotbot'), upassword='apycot')
-rql('SET U in_group G WHERE U login "apycotbot", G name "apycot"')
-rql('SET U in_group G WHERE U login "apycotbot", G name "guests"')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/narval/apycot.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,148 @@
+import os
+
+# setup the import machinery, necessary in dev environment
+from cubes import narval, apycot
+
+import apycotlib
+from apycotlib import atest, writer
+
+from narvalbot.prototype import EXPR_CONTEXT, action, input, output
+from narvalbot.elements import FilePath
+
+def _apycot_cleanup(plan):
+    if hasattr(plan, 'apycot'):
+        if plan.state != 'done':
+            plan.apycot.global_status = apycotlib.ERROR
+        plan.apycot.clean()
+    # XXX clean_env
+
+def _make_test_runner_action(runner):
+    @output('coverage_data', 'isinstance(elmt, FilePath)', optional=True)
+    @apycotlib.apycotaction(runner, 'INSTALLED in elmt.done_steps')
+    def act_runtests(inputs, runner=runner):
+        from apycotlib.checkers import python # trigger registration
+        test = inputs['apycot']
+        options = inputs.get('options')
+        checker, status = test.run_checker(runner, options=options)
+        if options.get('pycoverage') and hasattr(checker, 'coverage_data'):
+            return {'coverage_data': FilePath(checker.coverage_data,
+                                              type='coverage-data')}
+        return {}
+    return act_runtests
+
+
+STEP_CHECKEDOUT, STEP_INSTALLED, STEP_COVERED, STEP_DEBIANPKG = range(4)
+
+EXPR_CONTEXT['Test'] = atest.Test
+EXPR_CONTEXT['CHECKEDOUT'] = STEP_CHECKEDOUT
+EXPR_CONTEXT['INSTALLED']  = STEP_INSTALLED
+EXPR_CONTEXT['COVERED']    = STEP_COVERED
+EXPR_CONTEXT['DEBIANPKG']  = STEP_DEBIANPKG
+
+# base actions #################################################################
+
+@input('plan', 'isinstance(elmt, Plan)')
+@output('apycot',)
+@output('projectenv',)
+@action('apycot.init', finalizer=_apycot_cleanup)
+def act_apycot_init(inputs):
+    plan = inputs['plan']
+    w = writer.TestDataWriter(plan.memory.cnxh, plan.cwplan.eid)
+    test = plan.apycot = atest.Test(plan.cwplan, w, plan.options)
+    test.setup()
+    test.done_steps = set()
+    os.chdir(test.tmpdir)#XXX
+    return {'apycot': test, 'projectenv': test.environment}
+
+
+@input('apycot', 'isinstance(elmt, Test)')
+@output('projectenvs', list=True)
+@action('apycot.get_dependancies')
+def act_get_dependancies(inputs):
+    """Checkout repository for a test configuration"""
+    tconfig = inputs['apycot'].tconfig
+    environment = inputs['apycot'].environment
+    return {'projectenvs': [environment] + tconfig.dependencies(environment)}
+
+
+@input('apycot', 'isinstance(elmt, Test)')
+@input('projectenv', 'getattr(elmt, "__regid__", None) == "ProjectEnvironment"')
+@action('apycot.checkout')
+def act_checkout(inputs):
+    """Checkout repository for a test configuration"""
+    test = inputs['apycot']
+    test.checkout(inputs['projectenv'])
+    test.done_steps.add(STEP_CHECKEDOUT)
+    return {}
+
+
+@input('projectenv', 'getattr(elmt, "__regid__", None) == "ProjectEnvironment"')
+@input('apycot', 'isinstance(elmt, Test)', 'CHECKEDOUT in elmt.done_steps')
+@action('apycot.install')
+def act_install(inputs):
+    from apycotlib import preprocessors
+    test = inputs['apycot']
+    test.call_preprocessor('install', inputs['projectenv'])
+    if inputs['projectenv'] is test.environment: # XXX
+        test.done_steps.add(STEP_INSTALLED)
+    return {}
+
+
+# checker actions ##############################################################
+
+# @input('projectenv', 'getattr(elmt, "__regid__", None) == "ProjectEnvironment"')
+# @output('changes-file', 'isinstance(elmt, FilePath)', 'elmt.type == "debian.changes"', list=True)
+# @apycotlib.apycotaction('lgp.build', 'CHECKEDOUT in elmt.done_steps')
+# def act_lgp_build(inputs):
+#     test = inputs['apycot']
+#     checker, status = test.run_checker('lgp.build', inputs.get('options'))
+#     changes = []
+#     for distrib, dchanges in checker.debian_changes.iteritems():
+#         for change in dchanges:
+#             changes.append(FilePath(dchange, type="debian.changes", distribution=distrib))
+#     if status:
+#         test.done_steps.add(STEP_DEBIANPKG)
+#     return {'changes-file': changes}
+
+
+act_pyunit = _make_test_runner_action('pyunit')
+act_pytest = _make_test_runner_action('pytest')
+
+
+@apycotlib.apycotaction('pylint', 'INSTALLED in elmt.done_steps')
+def act_pylint(inputs):
+    from apycotlib.checkers import python # trigger registration
+    test = inputs['apycot']
+    checker, status = test.run_checker('pylint', inputs.get('options'))
+    return {}
+
+
+@input('coverage_data', 'isinstance(elmt, FilePath)', 'elmt.type == "coverage-data"')
+@apycotlib.apycotaction('pycoverage')
+def act_pycoverage(inputs):
+    from apycotlib.checkers import python # trigger registration
+    test = inputs['apycot']
+    options = {'coverage_data': inputs['coverage_data'].path}
+    checker, status = test.run_checker('pycoverage', options=options)
+    return {}
+
+
+# @apycotlib.apycotaction('lgp.check')
+# def act_lgp_check(inputs):
+#     test = inputs['apycot']
+#     checker, status = test.run_checker('lgp.check', inputs.get('options'))
+#     return {}
+
+
+# @apycotlib.apycotaction('piuparts', 'DEBIANPKG in elmt.done_steps')
+# def act_piuparts(inputs):
+#     test = inputs['apycot']
+#     checker, status = test.run_checker('piuparts', inputs.get('options'))
+#     return {}
+
+
+# @apycotlib.apycotaction('lintian', 'DEBIANPKG in elmt.done_steps')
+# def act_lintian(inputs):
+#     test = inputs['apycot']
+#     checker, status = test.run_checker('lintian', inputs.get('options'))
+#     return {}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/recipes.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,60 @@
+def create_quick_recipe(session):
+    recipe = session.create_entity('Recipe', name=u'apycot.recipe.quick')
+    step1 = recipe.add_step(u'action', u'apycot.init', initial=True)
+    step2 = recipe.add_step(u'action', u'apycot.get_dependancies')
+    recipe.add_transition(step1, step2)
+    step3 = recipe.add_step(u'action', u'apycot.checkout', for_each=u'projectenv')
+    recipe.add_transition(step2, step3)
+    step4 = recipe.add_step(u'action', u'apycot.install', for_each=u'projectenv')
+    recipe.add_transition(step3, step4)
+    step5 = recipe.add_step(u'action', u'apycot.pyunit', final=True)
+    recipe.add_transition(step4, step5)
+
+def create_full_recipe(session):
+    recipe = session.create_entity('Recipe', name=u'apycot.recipe.full')
+    step1 = recipe.add_step(u'action', u'apycot.init', initial=True)
+    step2 = recipe.add_step(u'action', u'apycot.get_dependancies')
+    step3 = recipe.add_step(u'action', u'apycot.checkout', for_each=u'projectenv')
+    step4 = recipe.add_step(u'action', u'apycot.install', for_each=u'projectenv')
+    step5bis = recipe.add_step(u'action', u'apycot.python.pylint')
+    step5 = recipe.add_step(u'action', u'apycot.pyunit',
+                            arguments=u'Options({"pycoverage":True})')
+    step6 = recipe.add_step(u'action', u'apycot.pycoverage')
+    step7 = recipe.add_step(u'action', u'basic.noop', final=True)
+    recipe.add_transition(step1, step2)
+    recipe.add_transition(step2, step3)
+    recipe.add_transition(step3, step4)
+    recipe.add_transition(step4, (step5, step5bis))
+    recipe.add_transition(step5, step6)
+    recipe.add_transition((step5bis, step6), step7)
+    return recipe
+
+# def create_debian_recipe(session):
+#     recipe = session.create_entity('Recipe', name=u'apycot.recipe.debian')
+#     step1 = recipe.add_step(u'action', u'apycot.init', initial=True)
+#     step2 = recipe.add_step(u'action', u'apycot.checkout')
+#     step3 = recipe.add_step(u'action', u'apycot.package.lgp_check')
+#     step3bis = recipe.add_step(u'action', u'apycot.debian.lgp_build')
+#     step4 = recipe.add_step(u'action', u'apycot.debian.lintian')
+#     step5 = recipe.add_step(u'action', u'basic.noop', final=True)
+#     recipe.add_transition(step1, step2)
+#     recipe.add_transition(step2, (step3, step3bis))
+#     recipe.add_transition(step3bis, step4)
+#     recipe.add_transition((step3, step4), step5)
+#     return recipe
+
+# def create_experimental_recipe(session):
+#     recipe = session.create_entity('Recipe', name=u'apycot.recipe.experimental')
+#     step1 = recipe.add_step(u'recipe', u'apycot.recipe.debian', initial=True)
+#     step2 = recipe.add_step(u'action', u'apycot.debian.upload')
+#     step3 = recipe.add_step(u'action', u'apycot.debian.publish', final=True)
+#     recipe.add_transition(step1, step2)
+#     recipe.add_transition(step2, step3)
+#     return recipe
+
+# def create_publish_recipe(session):
+#     # XXX
+#     # copy/upload from logilab-experimental to logilab-public
+#     # example: ldi upload logilab-public /path/to/experimental/repo/dists/*/*.changes
+#     recipe = session.create_entity('Recipe', name=u'apycot.recipe.publish')
+#     return recipe
--- a/schema.py	Wed Jul 28 12:10:03 2010 +0200
+++ b/schema.py	Fri Sep 10 14:14:42 2010 +0200
@@ -7,13 +7,17 @@
 __docformat__ = "restructuredtext en"
 _ = unicode
 
-from yams.buildobjs import (EntityType, RelationDefinition, String, Int,
-                            Datetime, Boolean)
+from yams.buildobjs import (EntityType, RelationDefinition, SubjectRelation,
+                            String, Int, Datetime, Boolean)
 from yams.reader import context
 
 from cubicweb.schema import (RQLConstraint, RRQLExpression, RQLUniqueConstraint,
                              make_workflowable)
 
+from cubes.narval.schema import IMMUTABLE_ATTR_PERMS, Plan
+
+from cubes.vcsfile.schema import Repository
+
 # tracker extension ############################################################
 
 if 'Project' in context.defined:
@@ -55,17 +59,21 @@
 # configuration entities and relations #########################################
 
 def post_build_callback(schema):
-    if not 'apycot' in schema['Repository'].permissions['read']:
-        schema['Repository'].permissions['read'] += ('apycot',)
+    if not 'narval' in schema['Repository'].permissions['read']:
+        schema['Repository'].permissions['read'] += ('narval',)
+    for attr in ('path', 'local_cache'):
+        rdef = schema['Repository'].rdef(attr)
+        if not 'narval' in rdef.permissions['read']:
+            rdef.permissions['read'] += ('narval',)
     # XXX has to be in post_build_callback since forge overwrite File
     # permissions
-    if not 'apycot' in schema['File'].permissions['add']:
-        schema['File'].permissions['add'] += ('apycot',)
+    if not 'narval' in schema['File'].permissions['add']:
+        schema['File'].permissions['add'] += ('narval',)
 
 
 class ProjectEnvironment(EntityType):
     __permissions__ = {
-        'read':   ('managers', 'users', 'guests', 'apycot'),
+        'read':   ('managers', 'users', 'guests', 'narval'),
         'add':    CONF_WRITE_GROUPS,
         'update': CONF_WRITE_GROUPS,
         'delete': CONF_WRITE_GROUPS,
@@ -75,25 +83,12 @@
         required=True, unique=True, maxsize=128,
         description=_('name for this environment')
         )
-
-    vcs_repository_type = String(
-        required=True,
-        vocabulary=(u'hg', u'svn', u'cvs', u'fs'),
-        description=_('kind of version control system (vcs): hg (mercurial), '
-                      'svn (subversion), cvs (CVS), fs (file system, eg no '
-                      'version control)')
-        )
-    vcs_repository = String(
-        required=True,
-        description=_('path or url to the vcs repository containing the project')
-        )
+    # XXX used?
     vcs_path = String(
         description=_('relative path to the project into the repository')
         )
-
-    check_preprocessors = String(
-        description=_('preprocessors to use for this project for install, '
-                      'debian, build_doc... (one per line)'),
+    check_config = String(
+        description=_('preprocessor/checker options (one per line)'),
         fulltextindexed=True
         )
     check_environment = String(
@@ -101,27 +96,49 @@
                       'environment (one per line)'),
         fulltextindexed=True
         )
-    check_config = String(
-        description=_('preprocessor/checker options (one per line)'),
-        fulltextindexed=True
-        )
 
 
-class TestConfigGroup(EntityType):
-    """regroup some common configuration used by multiple projects"""
+class TestConfig(EntityType):
+    """apycot configuration to register a project branch to test"""
     __permissions__ = {
-        'read':   ('managers', 'users', 'guests', 'apycot'),
+        'read':   ('managers', 'users', 'guests', 'narval'),
         'add':    CONF_WRITE_GROUPS,
         'update': CONF_WRITE_GROUPS,
         'delete': CONF_WRITE_GROUPS,
         }
+    name = String(
+        required=True, indexed=True, maxsize=128,
+        description=_('name for this configuration'),
+        constraints=[RQLUniqueConstraint('S name N, S use_environment E, '
+                                         'Y use_environment E, Y name N', 'Y')]
+        )
+    label = String(
+        unique=True, maxsize=128,
+        description=_('label for this configuration (useful when name isn\'t unique)'),
+        )
 
-    name = String(
-        required=True, unique=True, maxsize=128,
-        description=_('name for this configuration group'),
+    start_mode = String(
+        required=True,
+        vocabulary=(_('inherited'), _('manual'), _('on new revision'),
+                    _('hourly'), _('daily'),_('weekly'), _('monthly')),
+        default='manual',
+        description=_('when this test config should be started')
         )
-    checks = String(
-        description=_('comma separated list of checks to execute in this test config'),
+    computed_start_mode = String(
+        # when this test config should be started (automatically computed from
+        # start_mode
+        indexed=True,
+        vocabulary=(_('manual'), _('on new revision'),
+                    _('hourly'), _('daily'),_('weekly'), _('monthly')),
+        default='manual',
+        )
+    start_rev_deps = Boolean(
+        description=_("should tests for project environment depending on this "
+                      "test's environment be started when this test is "
+                      "automatically triggered")
+        )
+    check_config = String(
+        description=_('preprocessor/checker options (one per line)'),
         fulltextindexed=True
         )
     check_environment = String(
@@ -129,42 +146,9 @@
                       'environment (one per line)'),
         fulltextindexed=True
         )
-    check_config = String(
-        description=_('preprocessor/checker options (one per line)'),
-        fulltextindexed=True
-        )
-
-
-class TestConfig(TestConfigGroup):
-    """apycot configuration to register a project branch to test"""
-    __permissions__ = {
-        'read':   ('managers', 'users', 'guests', 'apycot'),
-        'add':    CONF_WRITE_GROUPS,
-        'update': CONF_WRITE_GROUPS,
-        'delete': CONF_WRITE_GROUPS,
-        }
-    name = String(
-        override=True, required=True, maxsize=128, indexed=True,
-        description=_('name for this configuration'),
-        constraints=[RQLUniqueConstraint('S name N, S use_environment E, '
-                                         'Y use_environment E, Y name N', 'Y')]
-        )
-
-    start_mode = String(
-        required=True, indexed=True,
-        vocabulary=(_('manual'), _('on new revision'),
-                    _('hourly'), _('daily'),_('weekly'), _('monthly')),
-        default='manual',
-        description=_('when this test config should be started')
-        )
-    start_rev_deps = Boolean(
-        default=False,
-        description=_("should tests for project environment depending on this "
-                      "test's environment be started when this test is "
-                      "automatically triggered")
-        )
-    # simply use 'branch=XXX' in check_config field. Get back documentation
-    # before to remove code below
+    # simply use 'branch=XXX'/'subpath=XXX' in check_config field. Get back
+    # documentation before to remove code below
+    #
     # vcs_branch  = String(
     #     description=_('branch to use for test\'s checkout. In case of '
     #                   'subversion repository, this should be the relative path '
@@ -172,93 +156,127 @@
     #                   'considered then).'),
     #     maxsize=256
     #     )
-    subpath = String(
-        description=_('path relative to the checkout directory to be considered by tests')
-        )
+    # subpath = String(
+    #     description=_('path relative to the checkout directory to be considered by tests')
+    #     )
 
 make_workflowable(TestConfig, in_state_descr=_('automatic test status'))
 
 
+class TestDependency(EntityType):
+    __permissions__ = {
+        'read':   ('managers', 'users', 'guests', 'narval'),
+        'add':    CONF_WRITE_GROUPS,
+        'update': CONF_WRITE_GROUPS,
+        'delete': CONF_WRITE_GROUPS,
+        }
+    for_environment = SubjectRelation('ProjectEnvironment', cardinality='1*',
+                                      inlined=True, composite='object')
+    for_testconfig = SubjectRelation('TestConfig', cardinality='1*',
+                                     inlined=True, composite='object')
+    on_environment = SubjectRelation('ProjectEnvironment', cardinality='1*',
+                                     inlined=True, composite='object')
+
+
 class use_environment(RelationDefinition):
     __permissions__ = {
-        'read':   ('managers', 'users', 'guests', 'apycot'),
+        'read':   ('managers', 'users', 'guests', 'narval'),
         'add':    CONF_WRITE_GROUPS,
         'delete': CONF_WRITE_GROUPS,
         }
-    inlined = True
     subject = 'TestConfig'
     object = 'ProjectEnvironment'
-    cardinality = '1*'
+    cardinality = '**'
     composite = 'object'
     description=_('project environment in which this test config should be launched')
     constraints = [RQLUniqueConstraint('S name N, Y use_environment O, Y name N', 'Y')]
 
 
+class use_recipe(RelationDefinition):
+    __permissions__ = {
+        'read':   ('managers', 'users', 'guests', 'narval'),
+        'add':    CONF_WRITE_GROUPS,
+        'delete': CONF_WRITE_GROUPS,
+        }
+    subject = 'TestConfig'
+    object = 'Recipe'
+    cardinality = '?*'
+
+
 class local_repository(RelationDefinition):
     __permissions__ = {
-        'read':   ('managers', 'users', 'guests', 'apycot'),
+        'read':   ('managers', 'users', 'guests', 'narval'),
         'add':    CONF_WRITE_GROUPS,
         'delete': CONF_WRITE_GROUPS,
         }
     subject = 'ProjectEnvironment'
     object = 'Repository'
     cardinality = '?*'
-    description = _('link to a vcsfile repository, may be used to replace '
-                    'vcs_repository_type / vcs_repository to have deeper '
-                    'integration.')
+    description = _('vcsfile repository holding the source code')
 
 
-class needs_checkout(RelationDefinition):
-    __permissions__ = {
-        'read':   ('managers', 'users', 'guests', 'apycot'),
-        'add':    CONF_WRITE_GROUPS,
-        'delete': CONF_WRITE_GROUPS,
-        }
-    subject = ('ProjectEnvironment', 'TestConfig')
-    object = 'ProjectEnvironment'
-    description = _('project\'s environments that should be installed from '
-                    'their repository to execute test with this configuration')
-    #constraints=[RQLConstraint('NOT S identity O')]
+# class needs_checkout(RelationDefinition):
+#     __permissions__ = {
+#         'read':   ('managers', 'users', 'guests', 'narval'),
+#         'add':    CONF_WRITE_GROUPS,
+#         'delete': CONF_WRITE_GROUPS,
+#         }
+#     subject = 'ProjectEnvironment'
+#     object = 'ProjectEnvironment'
+#     description = _('project\'s environments that should be installed from '
+#                     'their repository to execute test for the environment or with this configuration')
+#     #constraints=[RQLConstraint('NOT S identity O')]
 
-
-class use_group(RelationDefinition):
+class pe_refinement_of(RelationDefinition):
     __permissions__ = {
         'read':   ('managers', 'users', 'guests',),
         'add':    CONF_WRITE_GROUPS,
         'delete': CONF_WRITE_GROUPS,
         }
-    subject = ('TestConfig', 'TestConfigGroup')
-    object = 'TestConfigGroup'
+    name = 'refinement_of'
+    cardinality = '?*'
+    subject = 'ProjectEnvironment'
+    object = 'ProjectEnvironment'
     #constraints=[RQLConstraint('NOT S identity O')]
 
+class tc_refinement_of(pe_refinement_of):
+    subject = 'TestConfig'
+    object = 'TestConfig'
+
 
 # execution data entities and relations ########################################
 
 BOT_ENTITY_PERMS = {
-        'read':   ('managers', 'users', 'guests', 'apycot'),
-        'add':    ('apycot',),
-        'update': ('apycot',),
+        'read':   ('managers', 'users', 'guests', 'narval'),
+        'add':    ('narval',),
+        'update': ('narval',),
         'delete': ('managers',),
         }
 BOT_RELATION_PERMS = {
-        'read':   ('managers', 'users', 'guests', 'apycot'),
-        'add':    ('apycot',),
+        'read':   ('managers', 'users', 'guests', 'narval'),
+        'add':    ('narval',),
         'delete': ('managers',),
         }
 
-class TestExecution(EntityType):
-    __permissions__ = BOT_ENTITY_PERMS
+class TestExecution(Plan):
+    __specializes_schema__ = True
+    __permissions__ = {
+        'read':   ('managers', 'users', 'guests', 'narval'),
+        'add':    CONF_WRITE_GROUPS,
+        'update': ('narval',),
+        'delete': CONF_WRITE_GROUPS,
+        }
+    # XXX overall_checks_status
     status = String(required=True, internationalizable=True, indexed=True,
-                    default=u'set up',
-                    vocabulary=(_('set up'), _('running tests'),
+                    default=u'waiting execution',
+                    vocabulary=(_('waiting execution'), _('running'),
+                                _('set up'), _('running tests'),
                                 _('success'), _('partial'),
                                 _('failure'), _('error'), _('nodata'),
                                 _('missing'), _('skipped'),
                                 _('killed'))
                     )
-    starttime = Datetime(required=True)
-    endtime   = Datetime()
-    branch = String(indexed=True)
+    branch = String(indexed=True, __permissions__=IMMUTABLE_ATTR_PERMS, required=True)
     log = String()
 
 
@@ -288,7 +306,11 @@
 
 
 class using_config(RelationDefinition):
-    __permissions__ = BOT_RELATION_PERMS
+    __permissions__ = {
+        'read':   ('managers', 'users', 'guests', 'narval'),
+        'add':    CONF_WRITE_GROUPS,
+        'delete': CONF_WRITE_GROUPS,
+        }
     inlined = True
     subject = 'TestExecution'
     object = 'TestConfig'
@@ -296,6 +318,19 @@
     composite = 'object'
 
 
+class using_environment(RelationDefinition):
+    __permissions__ = {
+        'read':   ('managers', 'users', 'guests', 'narval'),
+        'add':    CONF_WRITE_GROUPS,
+        'delete': CONF_WRITE_GROUPS,
+        }
+    inlined = True
+    subject = 'TestExecution'
+    object = 'ProjectEnvironment'
+    cardinality = '1*'
+    composite = 'object'
+
+
 class during_execution(RelationDefinition):
     __permissions__ = BOT_RELATION_PERMS
     inlined = True
--- a/schema/_regproc.postgres.sql	Wed Jul 28 12:10:03 2010 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,10 +0,0 @@
-/* -*- sql -*- 
-
-   postgres specific registered procedures, 
-   require the plpythonu language installed 
-
-*/
-
-CREATE FUNCTION severity_sort_value(text) RETURNS int
-    AS 'return {"DEBUG": 0, "INFO": 10, "WARNING": 20, "ERROR": 30, "FATAL": 40}[args[0]]'
-    LANGUAGE plpythonu;
--- a/setup.py	Wed Jul 28 12:10:03 2010 +0200
+++ b/setup.py	Fri Sep 10 14:14:42 2010 +0200
@@ -145,6 +145,11 @@
     if USE_SETUPTOOLS and install_requires:
         kwargs['install_requires'] = install_requires
         kwargs['dependency_links'] = dependency_links
+    kwargs['packages'] = 'apycotlib', 'apycotlib.preprocessors', 'apycotlib.checkers'
+    kwargs['package_dir'] = {'apycotlib' : '_apycotlib',
+                             'apycotlib.preprocessors': '_apycotlib/preprocessors',
+                             'apycotlib.checkers': '_apycotlib/checkers',
+                             }
     return setup(name = distname,
                  version = version,
                  license = license,
--- a/site_cubicweb.py	Wed Jul 28 12:10:03 2010 +0200
+++ b/site_cubicweb.py	Fri Sep 10 14:14:42 2010 +0200
@@ -1,66 +1,12 @@
-# register severity sorting registered procedure
-from rql.utils import register_function, FunctionDescr
-
-
-class severity_sort_value(FunctionDescr):
-    supported_backends = ('postgres', 'sqlite',)
-    rtype = 'Int'
-
-try:
-    register_function(severity_sort_value)
-except AssertionError:
-    pass
-
-
+# XXX only for all-in-one or repository config
 options = (
-    ('bot-pyro-id',
-     {'type' : 'string',
-      'default' : ':apycot.apycotbot',
-      'help': ('Identifier of the apycot bot in the pyro name-server.'),
-      'group': 'apycot', 'level': 1,
-      }),
-    ('bot-pyro-ns',
-     {'type' : 'string',
-      'default' : None,
-      'help': ('Pyro name server\'s host where the bot is registered. If not '
-               'set, will be detected by a broadcast query. You can also '
-               'specify a port using <host>:<port> notation.'),
+    ('test-master',
+     {'type' : 'yn',
+      'default' : True,
+      'help': ('Is the repository responsible to automatically start test? '
+               'You should say yes unless you use a multiple repositories '
+               'setup, in which case you should say yes on one repository, '
+               'no on others'),
       'group': 'apycot', 'level': 1,
       }),
     )
-
-try:
-    from cubicweb.server import SQL_CONNECT_HOOKS
-except ImportError: # no server installation
-    pass
-else:
-
-    def init_sqlite_connexion(cnx):
-        def severity_sort_value(text):
-            return {"DEBUG": 0, "INFO": 10, "WARNING": 20,
-                    "ERROR": 30, "FATAL": 40}[text]
-        cnx.create_function("SEVERITY_SORT_VALUE", 1, severity_sort_value)
-
-    sqlite_hooks = SQL_CONNECT_HOOKS.setdefault('sqlite', [])
-    sqlite_hooks.append(init_sqlite_connexion)
-
-
-    options += (
-        ('test-master',
-         {'type' : 'yn',
-          'default' : True,
-          'help': ('Is the repository responsible to automatically start test? '
-                   'You should say yes unless you use a multiple repositories '
-                   'setup, in which case you should say yes on one repository, '
-                   'no on others'),
-          'group': 'apycot', 'level': 1,
-          }),
-        ('test-exec-cleanup-delay',
-         {'type' : 'time',
-          'default' : '60d',
-          'help': ('Interval of time after which test execution can be '
-                   'deleted. Default to 60 days. Set it to 0 if you don\'t '
-                   'want automatic deletion.'),
-          'group': 'apycot', 'level': 1,
-          }),
-        )
Binary file test/data/badpkg2_repo.tar.gz has changed
Binary file test/data/badpkg2_svn.tar.gz has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/badrest.txt	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,3 @@
+* toto
+ * tutu
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/badrest_2.txt	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,14 @@
+
+DUMMY README
+============
+
+section1
+--------
+
+c'est vraiment tres interessant, en l'état  de la valeur de l'€...
+
+
+Titre avec état
+_______________
+
+Rien à dire.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/badsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+return
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/badsyntax/badsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+return
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/extentionfilter/badsyntax.fancyextention	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+return
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/extentionfilter/goodsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+AAA = 'yo'
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/full_path_filtering/dodo/bad/goodsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+../../../syntax_dir/badsyntax/goodsyntax.py
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/full_path_filtering/dodo/bad/wrongsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+../../../syntax_dir/badsyntax/wrongsyntax.py
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/full_path_filtering/dodo/good/righsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+../../../syntax_dir/goodsyntax/righsyntax.py
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/full_path_filtering/tata/good/righsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+../../../syntax_dir/goodsyntax/righsyntax.py
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/goodpkg2.4/README	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,8 @@
+DUMMY README
+============
+
+section1
+--------
+
+c'est vraiment tres interessant
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/goodpkg2.4/__pkginfo__.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,41 @@
+# goodpkg2.4
+# Copyright (c) 2004-2008 LOGILAB S.A. (Paris, FRANCE).
+# http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; either version 2 of the License, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+""" agood package compatible with python 2.4 and above"""
+
+distname = 'goodpkg2.4'
+modname = 'goodpkg2.4'
+
+numversion = (0, 1, 0)
+version = '.'.join([str(num) for num in numversion])
+
+license = 'GPL'
+copyright = '''Copyright (c) 2004-2008 LOGILAB S.A. (Paris, FRANCE).
+http://www.logilab.fr/ -- mailto:contact@logilab.fr'''
+
+author = "David Pierre-Yves"
+author_email = "pierre-yves.david@logilab.fr"
+
+description = "a good package compatible with python2.4 and above"
+long_desc = "a good package compatible with python2.4 and above"
+
+
+from os.path import join
+
+web='http://www.goodpackage.org'
+
+pyversions = ['2.5','2.4']
+
Binary file test/data/goodpkg2.4/tests/.coverage has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/goodpkg2.4/tests/unittest_dummy.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,19 @@
+import unittest
+
+class DummyTest(unittest.TestCase):
+    def runTest(self):
+        values = set((1, 4, 1, 2, 5, 3, 7, 3, 12))
+        for toto in ( val * 4  for val in values if val % 2):
+            self.assertEqual(toto,toto)
+
+def Run(runner=None):
+    testsuite = unittest.TestSuite()
+    testsuite.addTest(DummyTest())
+
+    if runner is None:
+        runner = unittest.TextTestRunner()
+    return runner.run(testsuite)
+
+if __name__ == '__main__':
+    Run()
+
Binary file test/data/goodpkg_repo.tar.gz has changed
Binary file test/data/goodpkg_svn.tar.gz has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/goodrest.txt	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,8 @@
+DUMMY README
+============
+
+section1
+--------
+
+c'est vraiment tres interessant
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/goodrest_2.txt	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,10 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+DUMMY README
+============
+
+section1
+--------
+
+c'est vraiment tres interessant
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/goodrest_3.txt	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,15 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+DUMMY README
+============
+
+section1
+--------
+
+c'est vraiment tres interessant, en l'état  de la valeur de l'€...
+
+
+Titre avec état
+_______________
+
+Rien à dire.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/goodsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+AAA = 'yo'
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/goodsyntax/goodsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+AAA = 'yo'
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/invalid.xml	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="iso-8859-1"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+     "http://www.oasis-open.org/docbook/xml/4.0/docbookx.dtd" >
+<book>
+  <hehe></hehe>
+</book>
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/jscript/bad/bad.js	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,3 @@
+function toto() {
+     return toto;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/jscript/bad/bad_syntax.js	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,2 @@
+function toto() {
+    return 4;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/jscript/bad/info.js	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+var A = 4.;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/jscript/correct/correct.js	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,3 @@
+function toto() {
+    return 4;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/malformed.xml	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,3 @@
+<toto>
+ <unclosed>
+</toto>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/mixedsyntax/badsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+return
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/mixedsyntax/goodsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+AAA = 'yo'
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/py_test/README	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+test files for py.test chercker
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/py_test/badpkg/tests/test_dummy.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,6 @@
+import py.test
+
+def test_dummy():
+    assert 1 != 1
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/py_test/badpkg/tests/test_dummy_2.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,8 @@
+import py.test
+
+def test_dummy():
+    assert 1 == 1
+
+def test_dummy2():
+    assert 1 != 1
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/py_test/badpkg/tests/test_dummy_error.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,8 @@
+import py.test
+
+def test_dummy():
+    assert 1 == 1
+    import trucfoireux
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/py_test/badpkg/tests/test_dummy_error_2.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,7 @@
+import py.test
+
+def test_dummy():
+    assert 1 == 1
+    if toto
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/py_test/goodpkg/tests/test_dummy.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,6 @@
+import py.test
+
+def test_dummy():
+    assert 1 == 1
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/py_test/goodpkg/tests/test_dummy_2.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,6 @@
+import py.test
+
+def test_dummy():
+    assert 1 == 1
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/py_test/goodpkg/tests_2/test_dummy.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,6 @@
+import py.test
+
+def test_dummy():
+    assert 1 == 1
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/py_test/goodpkg/tests_2/test_dummy_2.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,6 @@
+import py.test
+
+def test_dummy():
+    assert 1 == 1
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/pylint_bad.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+AAA = 'yo'
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/pylint_ok.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,3 @@
+"""a module satisfying pylint
+"""
+__revision__ = 1
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/pylintrc	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,12 @@
+# lint Python modules using external checkers.
+# 
+# This is the main checker controlling the other ones and the reports
+# generation. It is itself both a raw checker and an astng checker in order
+# to:
+# * handle message activation / deactivation at the module level
+# * handle some basic but necessary stats'data (number of classes, methods...)
+# 
+[MESSAGES CONTROL]
+
+# Disable the message(s) with the given id(s).
+disable=C0111,
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/repository.conf	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,193 @@
+[APYCOT]
+
+# Identifier of the apycot bot in the pyro name-server.
+bot-pyro-id=:apycot.apycotbot
+
+# Pyro name server's host where the bot is registered. If not set, will be
+# detected by a broadcast query. You can also specify a port using
+# <host>:<port> notation.
+#bot-pyro-ns=
+
+
+[WEB]
+
+# authentication mode (cookie / http)
+auth-mode=cookie
+
+# realm to use on HTTP authentication mode
+realm=cubicweb
+
+# duration in seconds for HTTP sessions. 0 mean no expiration. Should be
+# greater than RQL server's session-time.
+http-session-time=0
+
+# duration in seconds for which unused connections should be closed, to limit
+# memory consumption. This is different from http-session-time since in some
+# cases you may have an unexpired http session (e.g. valid session cookie)
+# which will trigger transparent creation of a new session. In other cases,
+# sessions may never expire and cause memory leak. Should be smaller than
+# http-session-time, unless it's 0. Default to 12 h.
+cleanup-session-time=43200
+
+# Same as cleanup-session-time but specific to anonymous sessions. Default to 2
+# min.
+cleanup-anonymous-session-time=120
+
+# force text/html content type for your html pages instead of cubicweb
+# user-agent baseddeduction of an appropriate content type
+force-html-content-type=no
+
+# regular expression matching URLs that may be embeded. leave it blank if you
+# don't want the embedding feature, or set it to ".*" if you want to allow
+# everything
+#embed-allowed=
+
+# Mail used as recipient to report bug in this instance, if you want this
+# feature on
+#submit-mail=
+
+# use Accept-Language http header to try to set user interface's language
+# according to browser defined preferences
+language-negociation=yes
+
+# print the traceback on the error page when an error occured
+print-traceback=yes
+
+
+[VCSFILE]
+
+# interval between checking of new revisions in repositories (default to 5
+# minutes).
+check-revision-interval=300
+
+
+[PYRO]
+
+# Pyro server host, if not detectable correctly through gethostname(). It may
+# contains port information using <host>:<port> notation, and if not set, it
+# will be choosen randomly
+#pyro-host=
+
+# identifier of the CubicWeb instance in the Pyro name server
+pyro-instance-id=data
+
+# Pyro name server's host. If not set, will be detected by a broadcast query.
+# It may contains port information using <host>:<port> notation.
+pyro-ns-host=
+
+# Pyro name server's group where the repository will be registered.
+pyro-ns-group=cubicweb
+
+
+[APPOBJECTS]
+
+# comma separated list of identifiers of application objects (<registry>.<oid>)
+# to disable
+disable-appobjects=
+
+
+[MAIN]
+
+# size of the connections pools. Each source supporting multiple connections
+# will have this number of opened connections.
+connections-pool-size=4
+
+# size of the parsed rql cache size.
+rql-cache-size=300
+
+# When full text indexation of entity has a too important cost to be done when
+# entity are added/modified by users, activate this option and setup a job
+# using cubicweb-ctl db-rebuild-fti on your system (using cron for instance).
+delay-full-text-indexation=no
+
+# host name if not correctly detectable through gethostname
+#host=
+
+# http server port number (default to 8080)
+#port=
+
+# repository's pid file
+pid-file=/tmp/data-repository.pid
+
+# if this option is set, use the specified user to start the repository rather
+# than the user running the command
+#uid=
+
+# session expiration time, default to 30 minutes
+session-time=1800
+
+# profile code and use the specified file to store stats if this option is set
+#profile=
+
+# run a pyro server
+pyro-server=yes
+
+# server's log level
+log-threshold=ERROR
+
+# web server root url
+base-url=http://testing.fr/cubicweb/
+
+# allow users to login with their primary email if set
+allow-email-login=no
+
+# if set, base-url subdomain is replaced by the request's host, to help
+# managing sites with several subdomains in a single cubicweb instance
+use-request-subdomain=no
+
+# file where output logs should be written
+log-file=/var/log/cubicweb/data-repository.log
+
+# login of the CubicWeb user account to use for anonymous user (if you want to
+# allow anonymous)
+anonymous-user=anon
+
+# password of the CubicWeb user account matching login
+anonymous-password=anon
+
+# web instance query log file
+#query-log-file=
+
+# web server root url on https. By specifying this option your site can be
+# available as an http and https site. Authenticated users will in this case be
+# authenticated and once done navigate through the https site. IMPORTANTE NOTE:
+# to do this work, you should have your apache redirection include "https" as
+# base url path so cubicweb can differentiate between http vs https access. For
+# instance: RewriteRule ^/demo/(.*) http://127.0.0.1:8080/https/$1 [L,P] where
+# the cubicweb web server is listening on port 8080.
+#https-url=
+
+
+[EMAIL]
+
+# when a notification should be sent with no specific rules to find recipients,
+# recipients will be found according to this mode. Available modes are
+# "default-dest-addrs" (emails specified in the configuration variable with the
+# same name), "users" (every users which has activated account with an email
+# set), "none" (no notification).
+default-recipients-mode=default-dest-addrs
+
+# comma separated list of email addresses that will be used as default
+# recipient when an email is sent and the notification has no specific
+# recipient rules.
+default-dest-addrs=syt@logilab.fr
+
+# comma separated list of email addresses that will be notified of every
+# changes.
+supervising-addrs=
+
+# don't display actual email addresses but mangle them if this option is set to
+# yes
+mangle-emails=no
+
+# hostname of the SMTP mail server
+smtp-host=mail
+
+# listening port of the SMTP mail server
+smtp-port=25
+
+# name used as HELO name for outgoing emails from the repository.
+sender-name=cubicweb-test
+
+# email address used as HELO address for outgoing emails from the repository
+sender-addr=syt@logilab.fr
Binary file test/data/svn_test_repo.tar has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/syntax_dir/badsyntax/goodsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+AAA = 'yo'
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/syntax_dir/badsyntax/wrongsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+return
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/syntax_dir/goodsyntax/righsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+AAA = 'yo'
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/syntax_dir/rootbadsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+return
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/syntax_dir/rootgoodsyntax.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+AAA = 'yo'
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/test_dirs_test_pkg/README	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,8 @@
+DUMMY README
+============
+
+section1
+--------
+
+c'est vraiment tres interessant
+
Binary file test/data/test_dirs_test_pkg/dir_for_tetsing/.coverage has changed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/test_dirs_test_pkg/dir_for_tetsing/unittest_dummy.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,17 @@
+import unittest
+
+class DummyTest(unittest.TestCase):
+    def runTest(self):
+        self.failUnlessEqual(1, 1)
+
+def Run(runner=None):
+    testsuite = unittest.TestSuite()
+    testsuite.addTest(DummyTest())
+
+    if runner is None:
+        runner = unittest.TextTestRunner()
+    return runner.run(testsuite)
+
+if __name__ == '__main__':
+    Run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/testcase_pkg/tests/unittest_empty.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,1 @@
+import unittest
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/testcase_pkg/tests/unittest_errors.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,17 @@
+import unittest
+
+class DummyTest(unittest.TestCase):
+    def runTest(self):
+        str.erzglfdjslgjldfjgl
+
+def Run(runner=None):
+    testsuite = unittest.TestSuite()
+    testsuite.addTest(DummyTest())
+
+    if runner is None:
+        runner = unittest.TextTestRunner()
+    return runner.run(testsuite)
+
+if __name__ == '__main__':
+    Run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/testcase_pkg/tests/unittest_failure.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,17 @@
+import unittest
+
+class DummyTest(unittest.TestCase):
+    def runTest(self):
+        self.failUnlessEqual(1, 0)
+
+def Run(runner=None):
+    testsuite = unittest.TestSuite()
+    testsuite.addTest(DummyTest())
+
+    if runner is None:
+        runner = unittest.TextTestRunner()
+    return runner.run(testsuite)
+
+if __name__ == '__main__':
+    Run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/testcase_pkg/tests/unittest_mixed.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,65 @@
+from logilab.common.testlib import TestCase, unittest_main
+
+class DummyTest(TestCase):
+    def test_ok(self):
+        self.failUnlessEqual(1, 1)
+    def test_ok2(self):
+        pass
+    def test_skip(self):
+        self.skip("la mer qu'on voit dancer")
+    def test_skip2(self):
+        self.skip("Hast du etwas Zeit fur mich?")
+
+    def test_fail_0(self):
+        self.assertEquals(1337, 0)
+
+    def test_fail_1(self):
+        self.assertEquals(1337, 1)
+
+    def test_fail_2(self):
+        self.assertEquals(1337, 2)
+
+    def test_fail_3(self):
+        self.assertEquals(1337, 3)
+
+    def test_fail_4(self):
+        self.assertEquals(1337, 4)
+
+    def test_fail_5(self):
+        self.assertEquals(1337, 5)
+
+    def test_fail_6(self):
+        self.assertEquals(1337, 6)
+
+    def test_fail_7(self):
+        self.assertEquals(1337, 7)
+
+    def test_fail_8(self):
+        self.assertEquals(1337, 8)
+
+    def test_fail_9(self):
+        self.assertEquals(1337, 9)
+
+    def test_fail_10(self):
+        self.assertEquals(1337, 10)
+
+    def test_fail_11(self):
+        self.assertEquals(1337, 11)
+
+    def test_fail_12(self):
+        self.assertEquals(1337, 12)
+
+    def test_fail_13(self):
+        self.assertEquals(1337, 13)
+
+    def test_fail_14(self):
+        self.assertEquals(1337, 14)
+
+
+    def test_errors(self):
+        int.dsqhgdlsjgjl
+
+
+if __name__ == '__main__':
+    unittest_main()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/testcase_pkg/tests/unittest_mixed_std.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,61 @@
+from unittest import TestCase, main
+
+class DummyTest(TestCase):
+    def test_ok(self):
+        self.failUnlessEqual(1, 1)
+    def test_ok2(self):
+        pass
+
+    def test_fail_0(self):
+        self.assertEquals(1337, 0)
+
+    def test_fail_1(self):
+        self.assertEquals(1337, 1)
+
+    def test_fail_2(self):
+        self.assertEquals(1337, 2)
+
+    def test_fail_3(self):
+        self.assertEquals(1337, 3)
+
+    def test_fail_4(self):
+        self.assertEquals(1337, 4)
+
+    def test_fail_5(self):
+        self.assertEquals(1337, 5)
+
+    def test_fail_6(self):
+        self.assertEquals(1337, 6)
+
+    def test_fail_7(self):
+        self.assertEquals(1337, 7)
+
+    def test_fail_8(self):
+        self.assertEquals(1337, 8)
+
+    def test_fail_9(self):
+        self.assertEquals(1337, 9)
+
+    def test_fail_10(self):
+        self.assertEquals(1337, 10)
+
+    def test_fail_11(self):
+        self.assertEquals(1337, 11)
+
+    def test_fail_12(self):
+        self.assertEquals(1337, 12)
+
+    def test_fail_13(self):
+        self.assertEquals(1337, 13)
+
+    def test_fail_14(self):
+        self.assertEquals(1337, 14)
+
+
+    def test_errors(self):
+        int.dsqhgdlsjgjl
+
+
+if __name__ == '__main__':
+    main()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/testcase_pkg/tests/unittest_no_main.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,13 @@
+import unittest
+
+class DummyTest(unittest.TestCase):
+    def runTest(self):
+        pass
+
+def Run(runner=None):
+    testsuite = unittest.TestSuite()
+    testsuite.addTest(DummyTest())
+
+    if runner is None:
+        runner = unittest.TextTestRunner()
+    return runner.run(testsuite)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/testcase_pkg/tests/unittest_skip.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,10 @@
+from logilab.common.testlib import TestCase, unittest_main
+
+class DummyTest(TestCase):
+    def runTest(self):
+        self.skip("I'm singing in the rain")
+
+
+if __name__ == '__main__':
+    unittest_main()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/data/testcase_pkg/tests/unittest_success.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,17 @@
+import unittest
+
+class DummyTest(unittest.TestCase):
+    def runTest(self):
+        pass
+
+def Run(runner=None):
+    testsuite = unittest.TestSuite()
+    testsuite.addTest(DummyTest())
+
+    if runner is None:
+        runner = unittest.TextTestRunner()
+    return runner.run(testsuite)
+
+if __name__ == '__main__':
+    Run()
+
--- a/test/test_apycot.py	Wed Jul 28 12:10:03 2010 +0200
+++ b/test/test_apycot.py	Fri Sep 10 14:14:42 2010 +0200
@@ -7,20 +7,19 @@
 
 
 class MyValueGenerator(ValueGenerator):
-    def generate_ProjectEnvironment_check_preprocessors(self, entity, index):
-        return u'install=python_setup'
     def generate_Any_check_config(self, entity, index):
-        return u'pylint_threshold=70'
+        return u'pylint_threshold=70\ninstall=python_setup'
     def generate_Any_check_environment(self, entity, index):
-        return u'USE_SETUPTOOLS=0'
+        return u'NO_SETUPTOOLS=1'
 
 
 class AutomaticWebTest(AutomaticWebTest):
     no_auto_populate = set(('Repository', 'Revision', 'VersionedFile',
-                            'VersionContent', 'DeletedVersionContent',))
+                            'VersionContent', 'DeletedVersionContent',
+                            'TestExecution', 'Plan', 'CheckResult', 'CheckResultInfo'))
     ignored_relations = set(('at_revision', 'parent_revision',
                              'from_repository', 'from_revision', 'content_for',
-                             'nosy_list'))
+                             'nosy_list', 'execution_of'))
 
     def setUp(self):
         super(AutomaticWebTest, self).setUp()
@@ -33,7 +32,7 @@
                 rdef.set_action_permissions('add', ('managers',))
 
     def to_test_etypes(self):
-        return set(('ProjectEnvironment', 'TestConfig', 'TestConfigGroup',
+        return set(('ProjectEnvironment', 'TestConfig',
                     'TestExecution', 'CheckResult', 'CheckResultInfo'))
 
     def list_startup_views(self):
--- a/test/unittest_apycot.py	Wed Jul 28 12:10:03 2010 +0200
+++ b/test/unittest_apycot.py	Fri Sep 10 14:14:42 2010 +0200
@@ -1,42 +1,55 @@
+from __future__ import with_statement
+
 from utils import ApycotBaseTC
 
-from apycotbot.writer import DataWriter
-from apycotbot.utils import ConnectionHandler
+from apycotlib.writer import TestDataWriter
+from narvalbot import ConnectionHandler
 
-from cubes.apycot.logformat import log_to_html
+from cubes.narval.logformat import log_to_html
 
 
 CW_NAMESPACE_DIV = '<div xmlns:cubicweb="http://www.logilab.org/2008/cubicweb">%s</div>'
 
+class MockChecker(object):
+    def __init__(self, id, options):
+        self.id = id
+        self.options = options
+        self.options_def = dict( (k, {}) for k in options )
+
 class ApycotTC(ApycotBaseTC):
 
     def setUp(self):
         super(ApycotBaseTC, self).setUp()
-        cnx = self.login('apycotbot', password='apycot')
-        cnxh = ConnectionHandler(self.config.appid, cnxinfo={})
-        cnxh.cnx = cnx
-        cnxh._cu = cnx.cursor()
-        cnxh.cw = cnxh._cu.req
-        writer = DataWriter(cnxh, self.lgc.eid)
-        writer.start_test(u'default')
-        writer.start_check(u'pylint', {})
-        writer.raw('pylint_version', '0.18.1', type=u'version')
-        writer.debug('hip', path='/tmp/something', line=12)
-        writer.info('hop', path='/tmp/something')
-        writer.warning('''momo\n\n<br/>''')
-        writer.end_check(u'success')
-        writer.start_check(u'lintian', {'option': 'value'})
-        writer.raw('lintian_version', '1.0')
-        writer.error('bouh')
-        writer.fatal('di&d')
-        writer.end_check(u'failure')
-        writer.end_test(u'failure')
+        te = self.lgc.start(self.lgce)
+        self.commit()
+        with self.login('narval', password='narval0') as cu:
+            cnxh = ConnectionHandler(self.config.appid, cnxinfo={})
+            cnxh.cnx = cu.connection
+            cnxh._cu = cu
+            cnxh.cw = cu.req
+            writer = TestDataWriter(cnxh, te.eid)
+            writer.start()
+            cwriter = writer.make_check_writer()
+            cwriter.start(MockChecker(u'pylint', {}))
+            cwriter.raw('pylint_version', '0.18.1', type=u'version')
+            cwriter.debug('hip', path='/tmp/something', line=12)
+            cwriter.info('hop', path='/tmp/something')
+            cwriter.warning('''momo\n\n<br/>''')
+            cwriter.end(u'success')
+            cwriter = writer.make_check_writer()
+            cwriter.start(MockChecker(u'lintian', {'option': 'value'}))
+            cwriter.raw('lintian_version', '1.0')
+            cwriter.error('bouh')
+            cwriter.fatal('di&d')
+            cwriter.end(u'failure')
+            writer.end(u'failure')
         self.checks = self.execute('Any X, N ORDERBY N WHERE X is CheckResult, X name N')
 
     def test_writer_log_content(self):
         checks = self.checks
         self.assertEquals(len(checks), 2)
         self.assertTextEquals(checks.get_entity(0, 0).log, '''\
+20\t\t\toption=value<br/>
 40\t\t\tbouh<br/>
 50\t\t\tdi&amp;d<br/>''')
         self.assertTextEquals(checks.get_entity(1, 0).log, '''\
@@ -48,30 +61,32 @@
 
     def test_log_formatting_first_check(self):
         stream = []
-        log_to_html(self.request(), self.checks.get_entity(0, 0).log, stream.append)
+        log_to_html(self.request(), '', self.checks.get_entity(0, 0).log, stream.append)
         log_html = '\n'.join(stream)
         self.assertXMLStringWellFormed(CW_NAMESPACE_DIV % log_html)
         for pattern, count in (
-                ('<table class="listing apylog">', 1),
+                ('<table class="listing" id="">', 1),
                 ('<tr class="logError"', 1),
                 ('<tr class="logFatal"', 1),
-                ('<td class="logSeverity"', 2),
-                ('<td class="logPath"',  2),
-                ('<td class="logMsg"',   2),
-                ('<pre class="rawtext"', 2),
+                ('<tr class="logInfo"', 1),
+                ('<td class="logSeverity"', 3),
+                ('<td class="logPath"',  3),
+                ('<td class="logMsg"',   3),
+                ('<pre class="rawtext"', 3),
                 ('bouh', 1),
                 ('di&amp;d',1),
+                ('option=value', 1),
             ):
             self.assertIn(pattern, log_html)
             self.assertEquals(log_html.count(pattern), count)
 
     def test_log_formatting_second_check(self):
         stream = []
-        log_to_html(self.request(), self.checks.get_entity(1, 0).log, stream.append)
+        log_to_html(self.request(), '', self.checks.get_entity(1, 0).log, stream.append)
         log_html = '\n'.join(stream)
         self.assertXMLStringWellFormed(CW_NAMESPACE_DIV % log_html)
         for pattern, count in (
-                ('<table class="listing apylog">', 1),
+                ('<table class="listing" id="">', 1),
                 ('<tr class="logDebug"', 1),
                 ('<tr class="logInfo"', 1),
                 ('<tr class="logWarning"', 1),
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/unittest_checkers.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,238 @@
+"""
+unit tests for checkers
+"""
+
+import unittest
+import sys
+import os
+import tarfile
+import shutil
+from os.path import join
+from unittest import TestSuite
+
+from logilab.common.testlib import unittest_main, TestCase
+
+from utils import * # import this first
+
+zope_path = os.environ.get('SOFTWARE_HOME', '/usr/lib/zope/lib/python')
+sys.path.insert(1, zope_path)
+
+from apycotlib import SUCCESS, FAILURE, PARTIAL, NODATA, ERROR
+from apycotlib.checkers.python import *
+#from apycotlib.checkers.chks_xml import *
+#from apycotlib.checkers.chks_rest import *
+#from apycotlib.checkers.chks_pkg import *
+#from apycotlib.checkers import chks_pt
+
+WRITER = MockCheckWriter()
+
+
+
+# manage temporary repo
+def _create_repo():
+    for repo in ('badpkg2_repo', 'goodpkg_repo'):
+        path = input_path(repo)
+        if exists(path):
+            shutil.rmtree(path)
+        tarfile.open(input_path('%s.tar.gz' % repo), 'r|gz').extractall(input_path(''))
+
+def _rm_repo():
+    if input_path is None: # import error
+        return
+    for repo in ('badpkg2', 'goodpkg'):
+        path = input_path(repo)
+        if exists(path):
+            shutil.rmtree(path)
+_create_repo()
+import atexit
+atexit.register(_rm_repo)
+
+
+
+class FileCheckerTest(TestCase):
+    def __init__(self, checker, files, method_name):
+        TestCase.__init__(self, method_name)
+        self.checker = checker
+        self.files = [input_path(file) for file in files]
+        self.set_description('checker: <%s>, files: %s' % (checker.id, files))
+
+
+    def check_file(self,file):
+        return self.checker.check_file(file)
+
+    def check_dir(self,file):
+        return self.checker.check(MockTest(MockRepository(path=file)))
+
+    def chks_test_(self, expected, func):
+        self.checker.writer.start(self.checker)
+        self.checker.check_options()
+        for file in self.files:
+            self.checker.writer.clear_writer()
+            status = func(file)
+            msg = []
+            msg.append('%s on %s status: %s expecting %s' % (self.checker.id, file, status, expected))
+            if self.checker.options:
+                msg.extend('    '+('='.join(str(i) for i in pair)) for pair in self.checker.options.iteritems())
+            msg.append('last messages:')
+            msg.extend(WRITER._logs[-5:])
+            msg = '\n'.join(msg)
+
+            self.failUnlessEqual(status, expected, msg)#+'\n-----\n'+WRITER.stderr.getvalue())
+
+    def chks_test_file_success(self):
+        self.chks_test_(SUCCESS, self.check_file)
+
+    def chks_test_file_failure(self):
+        self.chks_test_(FAILURE, self.check_file)
+
+    def chks_test_file_error(self):
+        self.chks_test_(ERROR, self.check_file)
+
+    def chks_test_dir_success(self):
+        self.chks_test_(SUCCESS, self.check_dir)
+
+    def chks_test_dir_failure(self):
+        self.chks_test_(FAILURE, self.check_dir)
+
+    def chks_test_dir_partial(self):
+        self.chks_test_(PARTIAL, self.check_dir)
+
+    def chks_test_dir_nodata(self):
+        self.chks_test_(NODATA, self.check_dir)
+
+    def chks_test_dir_error(self):
+        self.chks_test_(ERROR, self.check_dir)
+
+
+class ModuleCheckerTest(FileCheckerTest):
+
+    def check_pkg(self, file):
+        return self.checker.check(MockTest(MockRepository(path=file)))
+
+    def chks_test_success(self):
+        self.chks_test_(SUCCESS, self.check_pkg)
+
+    def chks_test_error(self):
+        self.chks_test_(ERROR, self.check_pkg)
+
+    def chks_test_failure(self):
+        self.chks_test_(FAILURE, self.check_pkg)
+
+    def chks_test_partial(self):
+        self.chks_test_(PARTIAL, self.check_pkg)
+
+    def chks_test_nodata(self):
+        self.chks_test_(NODATA, self.check_dir)
+
+
+def suite():
+    """return the unitest suite"""
+    testsuite = TestSuite()
+    addTest = testsuite.addTest
+    ##### FileChecker #####
+
+    file_checker = PythonSyntaxChecker(WRITER)
+    addTest(FileCheckerTest(file_checker, ['empty_dir'], 'chks_test_dir_nodata'))
+
+    ##### PythonSyntaxChecker #####
+    python_syntax = PythonSyntaxChecker(WRITER)
+    addTest(FileCheckerTest(python_syntax, ['goodsyntax.py'], 'chks_test_file_success'))
+    addTest(FileCheckerTest(python_syntax, ['badsyntax.py'], 'chks_test_file_failure'))
+    addTest(FileCheckerTest(python_syntax, ['goodsyntax/'], 'chks_test_dir_success'))
+    addTest(FileCheckerTest(python_syntax, ['badsyntax/'], 'chks_test_dir_failure'))
+    addTest(FileCheckerTest(python_syntax, ['mixedsyntax/'], 'chks_test_dir_failure'))
+    addTest(FileCheckerTest(python_syntax, ['extentionfilter/'], 'chks_test_dir_success'))
+    addTest(FileCheckerTest(python_syntax, ['syntax_dir/badsyntax/'], 'chks_test_dir_failure'))
+
+
+    addTest(FileCheckerTest(python_syntax, ['goodsyntax.py'], 'chks_test_file_success'))
+
+    python_syntax = PythonSyntaxChecker(WRITER, {'ignore': 'wrongsyntax.py'})
+    addTest(FileCheckerTest(python_syntax, ['syntax_dir/badsyntax/'], 'chks_test_dir_success'))
+
+    python_syntax = PythonSyntaxChecker(WRITER, {'ignore':'rootbadsyntax.py,badsyntax'})
+    addTest(FileCheckerTest(python_syntax, ['syntax_dir/'], 'chks_test_dir_success'))
+
+    # check filtering of specific subdirectory
+    python_syntax = PythonSyntaxChecker(WRITER, {'ignore':'dodo/bad'})
+    addTest(FileCheckerTest(python_syntax, ['full_path_filtering/'], 'chks_test_dir_success'))
+
+    # check filtering of absolute path
+    python_syntax = PythonSyntaxChecker(WRITER, {'ignore':'full_path_filtering/dodo/bad'})
+    addTest(FileCheckerTest(python_syntax, ['full_path_filtering/'], 'chks_test_dir_success'))
+    python_syntax = PythonSyntaxChecker(WRITER)
+    python_syntax.best_status = 'partial'
+    addTest(FileCheckerTest(python_syntax, ['extentionfilter/'], 'chks_test_dir_partial'))
+
+    ##### PyUnitTestChecker #####
+    python_unit = PyUnitTestChecker(WRITER)
+    addTest(ModuleCheckerTest(python_unit, ['goodpkg'], 'chks_test_success'))
+    addTest(ModuleCheckerTest(python_unit, ['badpkg1'], 'chks_test_nodata'))
+    addTest(ModuleCheckerTest(python_unit, ['badpkg2'], 'chks_test_failure'))
+
+    python_unit = PyUnitTestChecker(WRITER, {'test_dirs':'dir_for_tetsing'}) # typo is intentional#
+    addTest(ModuleCheckerTest(python_unit, ['test_dirs_test_pkg/'], 'chks_test_success'))
+
+    # use sys.executable, success
+    python_unit = PyUnitTestChecker(WRITER, {'use_pkginfo_python_versions': '0'})
+    addTest(ModuleCheckerTest(python_unit, ['goodpkg2.4/'], 'chks_test_success'))
+    # py 2.4 & py 2.5, success
+    python_unit = PyUnitTestChecker(WRITER, {'use_pkginfo_python_versions': '1'})
+    addTest(ModuleCheckerTest(python_unit, ['goodpkg2.4/'], 'chks_test_success'))
+    # use sys.executable, success
+    python_unit = PyUnitTestChecker(WRITER, {'ignored_python_versions':'2.4'})
+    addTest(ModuleCheckerTest(python_unit, ['goodpkg2.4/'], 'chks_test_success'))
+    # unavailable py 2.3, error (ignored_python_versions option ignored when tested_python_versions is set)
+    python_unit = PyUnitTestChecker(WRITER, {'ignored_python_versions':'2.3', 'tested_python_versions':'2.3'})
+    addTest(ModuleCheckerTest(python_unit, ['goodpkg2.4/'], 'chks_test_error'))
+    python_unit = PyUnitTestChecker(WRITER, {'tested_python_versions':'2.3', 'use_pkginfo_python_versions':'0'})
+    addTest(ModuleCheckerTest(python_unit, ['goodpkg2.4/'], 'chks_test_error'))
+
+    ##### PyCoverageChecker #####
+
+    python_chks_test_coverage = PyCoverageChecker(WRITER, {'coverage.threshold': 1,
+                                                           'coverage_data': 'data/goodpkg2.4/tests/.coverage'})
+    addTest(ModuleCheckerTest(python_chks_test_coverage, ['goodpkg'], 'chks_test_success'))
+
+    ##### PyLintChecker #####
+    pylint = PyLintChecker(WRITER, {'pylint.threshold': 7})
+    addTest(ModuleCheckerTest(pylint, ['pylint_ok.py'], 'chks_test_success'))
+    addTest(ModuleCheckerTest(pylint, ['pylint_bad.py'], 'chks_test_failure'))
+
+    pylint_rc = PyLintChecker(WRITER, {'pylint.threshold': 7,
+                                       'pylintrc':input_path("pylintrc"),
+                                       "pylint.show_categories": "F,E,W,C"})
+    addTest(ModuleCheckerTest(pylint_rc, ['pylint_bad.py'], 'chks_test_success'))
+
+    # xml_syntax = XmlFormChecker(WRITER)
+    # addTest(FileCheckerTest(xml_syntax, ['invalid.xml'], 'chks_test_file_success'))
+    # addTest(FileCheckerTest(xml_syntax, ['malformed.xml'], 'chks_test_file_failure'))
+
+    # xml_valid = XmlValidChecker(WRITER, {'catalog': join(INPUTS_DIR,'logilab.cat')})
+    # addTest(FileCheckerTest(xml_valid, ['invalid.xml'], 'chks_test_file_failure'))
+
+    # rest_syntax = ReSTChecker(WRITER)
+    # addTest(FileCheckerTest(rest_syntax, ['goodrest.txt'], 'chks_test_file_success'))
+    # addTest(FileCheckerTest(rest_syntax, ['goodrest_2.txt'], 'chks_test_file_success'))
+    # addTest(FileCheckerTest(rest_syntax, ['goodrest_3.txt'], 'chks_test_file_success'))
+    # addTest(FileCheckerTest(rest_syntax, ['badrest.txt'], 'chks_test_file_failure'))
+    # addTest(FileCheckerTest(rest_syntax, ['badrest_2.txt'], 'chks_test_file_failure'))
+    # if hasattr(chks_pt, 'ZopePageTemplate'):
+    #     pt_syntax = chks_pt.ZPTChecker(WRITER)
+    #     addTest(FileCheckerTest(pt_syntax, ['task_view.pt'], 'chks_test_file_success'))
+    #     addTest(FileCheckerTest(pt_syntax, ['task_view_bad.pt'], 'chks_test_file_failure'))
+
+    # pkg_doc = PackageDocChecker(WRITER)
+    # addTest(ModuleCheckerTest(pkg_doc, ['goodpkg'],'chks_test_success'))
+    # addTest(ModuleCheckerTest(pkg_doc, ['badpkg2'], 'chks_test_failure'))
+
+    # XXX py.test is not maintained anymore
+    # if not os.system('which py.test'):
+    #     py_test = PyDotTestChecker(WRITER)
+    #     addTest(ModuleCheckerTest(py_test, ['py_test/goodpkg'], 'chks_test_success'))
+    #     addTest(ModuleCheckerTest(py_test, ['py_test/badpkg'], 'chks_test_failure'))
+
+    return testsuite
+
+if __name__ == '__main__':
+    unittest_main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/unittest_checkers_jslint.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,26 @@
+from logilab.common.testlib import TestSuite, unittest_main
+
+from unittest_checkers import FileCheckerTest, WRITER
+
+try:
+    from apycotlib.checkers.jslint import JsLintChecker, JsLintParser
+except ImportError:
+    print 'install rhino to enable jslint'
+    JsLintChecker = JsLintParser = None
+
+def suite():
+    testsuite = TestSuite()
+    if JsLintChecker is None:
+        return testsuite
+    addTest = testsuite.addTest
+    # JSLint
+    js_lint = JsLintChecker(WRITER)
+    addTest(FileCheckerTest(js_lint, ['jscript/correct'], 'chks_test_dir_success'))
+
+    addTest(FileCheckerTest(js_lint, ['jscript/bad/',],
+                            'chks_test_dir_failure'))
+    return testsuite
+
+
+if __name__ == '__main__':
+    unittest_main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/unittest_checkers_pyunit.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,96 @@
+"""
+unit tests for python test checkers
+"""
+
+import os, sys
+from os.path import join
+
+from logilab.common.testlib import unittest_main, TestCase, mock_object
+
+from utils import MockCheckWriter, input_path
+
+from apycotlib import SUCCESS, FAILURE, ERROR, PARTIAL, NODATA
+from apycotlib.checkers import python
+
+def _test_cmd(self, cmd, status, success=0, failures=0, errors=0, skipped=0):
+
+    for name, got, expected in (
+            ('failures', cmd.parser.failures, failures),
+            ('errors', cmd.parser.errors, errors),
+            ('skipped', cmd.parser.skipped, skipped),
+            ('success', cmd.parser.success, success),
+        ):
+        self.assertEquals(got, expected, '%i %s but %i expected'
+                          % (got, name, expected))
+    self.assertIs(cmd.status, status)
+
+class PyUnitTestCheckerTC(TestCase):
+    input_dir = input_path('testcase_pkg/tests/')
+
+    def setUp(self):
+        self.checker = python.PyUnitTestChecker(MockCheckWriter())
+        self.checker._coverage = None
+        self.checker._path = input_path('')
+
+    def input_path(self, path):
+        return join(self.input_dir, path)
+
+    def test_run_test_result_empty(self):
+        cmd = self.checker.run_test(self.input_path('unittest_empty.py'))
+        _test_cmd(self, cmd, NODATA, success=0)
+
+    def test_run_test_result_no_main(self):
+        cmd = self.checker.run_test(self.input_path('unittest_no_main.py'))
+        _test_cmd(self, cmd, NODATA, success=0)
+
+    def test_run_test_result_success(self):
+        cmd = self.checker.run_test(self.input_path('unittest_success.py'))
+        _test_cmd(self, cmd, SUCCESS, success=1)
+
+    def test_run_test_result_failure(self):
+        cmd = self.checker.run_test(self.input_path('unittest_failure.py'))
+        _test_cmd(self, cmd, FAILURE, failures=1)
+
+    def test_run_test_result_error(self):
+        cmd = self.checker.run_test(self.input_path('unittest_errors.py'))
+        _test_cmd(self, cmd, FAILURE, errors=1)
+
+    def test_run_test_result_skipped(self):
+        cmd = self.checker.run_test(self.input_path('unittest_skip.py'))
+        _test_cmd(self, cmd, PARTIAL, skipped=1)
+
+    def test_run_test_result_mixed(self):
+        cmd = self.checker.run_test(self.input_path('unittest_mixed.py'))
+        _test_cmd(self, cmd, FAILURE, 2, 15, 1, 2)
+
+    def test_run_test_result_mixed_std(self):
+        cmd = self.checker.run_test(self.input_path('unittest_mixed_std.py'))
+        _test_cmd(self, cmd, FAILURE, 2, 15, 1, 0)
+
+
+class PyTestCheckerTC(TestCase):
+    input_dir = input_path('testcase_pkg/tests/')
+
+    def setUp(self):
+        self.checker = python.PyTestChecker(MockCheckWriter())
+        self.checker._coverage = False
+        self.cwd = os.getcwd()
+        os.chdir(input_path('testcase_pkg'))
+
+    def tearDown(self):
+        os.chdir(self.cwd)
+
+    def _test_cmd(self, *args):
+
+        cmd_args = ['-c', 'from logilab.common.pytest '
+                  'import run; run()',]
+        cmd_args.extend(args)
+        return self.checker.run_test(cmd_args, sys.executable)
+
+    def test_global(self):
+        cmd = self._test_cmd()
+        #XXX should be NODATA but not handle at the moment
+        _test_cmd(self, cmd, FAILURE, 6, 31, 3, 3)
+
+if __name__ == '__main__':
+    unittest_main()
--- a/test/unittest_entities.py	Wed Jul 28 12:10:03 2010 +0200
+++ b/test/unittest_entities.py	Fri Sep 10 14:14:42 2010 +0200
@@ -2,9 +2,10 @@
 
 from cubicweb import ValidationError
 
+from cubes.apycot.views.reports import all_check_results
+
 from utils import ApycotBaseTC
 
-
 class MockWriter(object):
     """fake apycot.IWriter class, ignore every thing"""
 
@@ -22,64 +23,57 @@
 
     def setup_database(self):
         ApycotBaseTC.setup_database(self)
-        self.add_test_config(u'lgd', checks=None, check_config=None,
-                             env=self.lgce, group=self.pyp)
+        self.lgd = self.add_test_config(u'lgd', check_config=None,
+                                        env=self.lgce, group=self.pyp)
 
     def test_use_group_base(self):
-        lgd = self.execute('TestConfig X WHERE X name "lgd"').get_entity(0, 0)
-        self.assertEquals(lgd.all_checks,
-                          'python_pkg,pkg_doc,python_syntax,python_lint,python_unittest,python_test_coverage'.split(','))
-        self.assertEquals(lgd.apycot_configuration,
-                          {'python_lint_treshold': '7',
+        self.assertEquals(self.lgd.apycot_configuration(self.lgce),
+                          {'install': 'setup_install',
+                           'python_lint_treshold': '7',
                            'python_lint_ignore': 'thirdparty',
                            'python_test_coverage_treshold': '70',
                            'env-option': 'value'})
 
     def test_use_group_override(self):
-        lgc = self.execute('TestConfig X WHERE X name "lgc"').get_entity(0, 0)
-        self.assertEquals(lgc.all_checks,
-                          'python_lint,python_unittest,python_test_coverage'.split(','))
-        self.assertEquals(lgc.apycot_configuration,
-                          {'python_lint_treshold': '8',
+        self.assertEquals(self.lgc.apycot_configuration(self.lgce),
+                          {'install': 'setup_install',
+                           'python_lint_treshold': '8',
                            'python_lint_ignore': 'thirdparty',
                            'python_test_coverage_treshold': '70',
                            'pouet': '5',
                            'env-option': 'value'})
 
-    def test_latest_full_execution(self):
-        self.login('apycotbot', password='apycot')
-        ex1 = self.add_execution('lgc', [('unittest', 'success'), ('coverage', 'success')])
-        ex2 = self.add_execution('lgc', [('unittest', 'failure')])
+    def test_all_check_results(self):
+        ex1 = self.lgc.start(self.lgce, check_duplicate=False)
+        ex2 = self.lgc.start(self.lgce, check_duplicate=False)
         self.commit()
-        self.restore_connection()
-        self.assertEquals(self.lgc.latest_execution().eid, ex2.eid)
-        self.assertEquals(self.lgc.latest_full_execution().eid, ex1.eid)
-
-    def test_all_check_results(self):
-        self.login('apycotbot', password='apycot')
-        ex1 = self.add_execution('lgc', [('unittest', 'success'), ('coverage', 'success')])
+        self.login('narval', password='narval0')
+        self.dumb_execution(ex1, [('unittest', 'success'), ('coverage', 'success')])
+        self.commit()
         covcr = ex1.check_result_by_name('coverage').eid
-        ex2 = self.add_execution('lgc', [('unittest', 'failure')])
+        self.dumb_execution(ex2, [('unittest', 'failure')])
+        self.commit()
         ucr = ex2.check_result_by_name('unittest').eid
         self.commit()
         self.restore_connection()
-        self.assertEquals([cr.eid for cr in self.lgc.all_check_results()],
+        self.assertEquals([cr.eid for cr in all_check_results(self.lgc)],
                           [covcr, ucr])
 
     def test_duplicated_tc_same_env(self):
-        tcgncstrs = self.schema['TestConfigGroup'].rdef('name').constraints
-        self.assertEquals([cstr.type() for cstr in tcgncstrs], ['SizeConstraint', 'UniqueConstraint'])
         tcncstrs = self.schema['TestConfig'].rdef('name').constraints
         self.assertEquals([cstr.type() for cstr in tcncstrs], ['RQLUniqueConstraint', 'SizeConstraint'])
-        self.request().create_entity('TestConfig', name=u'lgd')
-        self.execute('SET X use_environment Y WHERE X name "lgd", Y is ProjectEnvironment')
+        self.request().create_entity('TestConfig', name=u'lgd', use_environment=self.lgce)
         self.assertRaises(ValidationError, self.commit)
 
     def test_status_change(self):
-        self.login('apycotbot', password='apycot')
-        ex1 = self.add_execution('lgc', [('unittest', 'success'), ('coverage', 'success')])
-        ex2 = self.add_execution('lgc', [('unittest', 'failure'), ('coverage', 'success')])
-        ex3 = self.add_execution('lgc', [('unittest', 'success'), ('coverage', 'error')])
+        ex1 = self.lgc.start(self.lgce, check_duplicate=False)
+        ex2 = self.lgc.start(self.lgce, check_duplicate=False)
+        ex3 = self.lgc.start(self.lgce, check_duplicate=False)
+        self.commit()
+        self.login('narval', password='narval0')
+        self.dumb_execution(ex1, [('unittest', 'success'), ('coverage', 'success')])
+        self.dumb_execution(ex2, [('unittest', 'failure'), ('coverage', 'success')])
+        self.dumb_execution(ex3, [('unittest', 'success'), ('coverage', 'error')])
         self.commit()
 
         status_changes = ex2.status_changes()
@@ -100,22 +94,22 @@
                                            ex3.check_result_by_name('unittest').eid),
                               'coverage': (ex2.check_result_by_name('coverage').eid,
                                            ex3.check_result_by_name('coverage').eid)})
-
-    def test_branch_for_pe(self):
-        #check that branch defined in ProjectEnvironement are propertly retrieved
+    # XXX  to backport
+    # def test_branch_for_pe(self):
+    #     #check that branch defined in ProjectEnvironement are propertly retrieved
 
-        data = {
-            'cc': self.lgce.check_config + '\nbranch=toto',
-            'e': self.lgce.eid,
+    #     data = {
+    #         'cc': self.lgce.check_config + '\nbranch=toto',
+    #         'e': self.lgce.eid,
 
-        }
-        self.execute('SET PE check_config %(cc)s WHERE PE eid %(e)s', data)
+    #     }
+    #     self.execute('SET PE check_config %(cc)s WHERE PE eid %(e)s', data)
 
-        entity = self.execute('Any PE WHERE PE eid %(e)s', data).get_entity(0,0)
+    #     entity = self.execute('Any PE WHERE PE eid %(e)s', data).get_entity(0,0)
 
-        repo_def = entity.apycot_repository_def
-        self.assertIn('branch', repo_def)
-        self.assertEquals(repo_def['branch'], 'toto')
+    #     repo_def = entity.apycot_repository_def
+    #     self.assertIn('branch', repo_def)
+    #     self.assertEquals(repo_def['branch'], 'toto')
 
 
 if __name__ == '__main__':
--- a/test/unittest_hooks.py	Wed Jul 28 12:10:03 2010 +0200
+++ b/test/unittest_hooks.py	Fri Sep 10 14:14:42 2010 +0200
@@ -7,8 +7,7 @@
 from cubicweb import Binary
 
 
-from utils import ApycotBaseTC, proxy
-from cubes.apycot.hooks import start_test # load once bot_proxy has been monkey patched
+from utils import INPUTS_DIR, ApycotBaseTC
 
 
 def clean_str(string):
@@ -22,25 +21,33 @@
                                      address=u'admin@cubicweb.org',
                                      reverse_use_email=self.user())
 
+    def start_lgc_tests(self):
+        plan = self.lgc.start(self.lgce)
+        self.lgc._cw.cnx.commit()
+        return plan
+
     def test_exec_status_change(self):
-        self.login('apycotbot', password='apycot')
-        self.add_execution('lgc', [('unittest', 'success'), ('coverage', 'success')])
+        self.login('narval', password='narval0')
+        plan = self.start_lgc_tests()
+        self.dumb_execution(plan, [('unittest', 'success'), ('coverage', 'success')])
         self.assertEquals(len(MAILBOX), 0)
         self.commit()
         self.assertEquals(len(MAILBOX), 0)
-        self.add_execution('lgc', [('unittest', 'success'), ('coverage', 'success')])
+        plan = self.start_lgc_tests()
+        self.dumb_execution(plan, [('unittest', 'success'), ('coverage', 'success')])
         self.assertEquals(len(MAILBOX), 0)
         self.commit()
         self.assertEquals(len(MAILBOX), 0)
-        self.add_execution('lgc', [('unittest', 'failure'), ('coverage', 'failure')])
-        self.assertEquals(len(MAILBOX), 0)
+        plan = self.start_lgc_tests()
+        self.dumb_execution(plan, [('unittest', 'failure'), ('coverage', 'failure')])
+        self.assertEquals(len(MAILBOX), 0, MAILBOX)
         self.commit()
         self.assertEquals(len(MAILBOX), 1)
         self.assertEquals(MAILBOX[0].recipients, ['admin@cubicweb.org'])
         self.assertEquals(MAILBOX[0].message.get('Subject'),
-                          '[data] lgce / lgc now has 2 failure')
+                          '[data] lgce/lgc#default now has 2 failure')
         self.assertTextEquals(clean_str(MAILBOX[0].message.get_payload(decode=True)),
-                              '''The following changes occured between executions on branch None:
+                              '''The following changes occured between executions on branch default:
 
 * coverage status changed from success to failure
 * unittest status changed from success to failure
@@ -52,17 +59,19 @@
 
 
     def test_exec_one_status_change(self):
-        self.login('apycotbot', password='apycot')
-        self.add_execution('lgc', [('unittest', 'success'), ('coverage', 'success')])
+        self.login('narval', password='narval0')
+        plan = self.start_lgc_tests()
+        self.dumb_execution(plan, [('unittest', 'success'), ('coverage', 'success')])
         self.commit()
-        self.add_execution('lgc', [('unittest', 'failure')])
+        plan = self.start_lgc_tests()
+        self.dumb_execution(plan, [('unittest', 'failure')])
         self.commit()
         self.assertEquals(len(MAILBOX), 1)
         self.assertEquals(MAILBOX[0].recipients, ['admin@cubicweb.org'])
         self.assertEquals(MAILBOX[0].message.get('Subject'),
-                          '[data] lgce / lgc: success -> failure (unittest)')
+                          '[data] lgce/lgc#default: success -> failure (unittest)')
         self.assertTextEquals(clean_str(MAILBOX[0].message.get_payload(decode=True)),
-                              '''The following changes occured between executions on branch None:
+                              '''The following changes occured between executions on branch default:
 
 * unittest status changed from success to failure
 
@@ -72,14 +81,15 @@
 URL: http://testing.fr/cubicweb/projectenvironment/lgce/lgc/<EID>''')
 
 
+HGREPO = os.path.join(INPUTS_DIR, u'hgrepo')
 def setup_module(*args, **kwargs):
-    if not os.path.exists('data/hgrepo'):
-        os.mkdir('data/hgrepo')
-        os.system('cd data/hgrepo; hg init;')
+    if not os.path.exists(HGREPO):
+        os.mkdir(HGREPO)
+        os.system('cd %s && hg init' % HGREPO)
 
 def teardown_module(*args, **kwargs):
-    if os.path.exists('data/hgrepo'):
-        os.system('rm -rf data/hgrepo')
+    if os.path.exists(HGREPO):
+        os.system('rm -rf %s' % HGREPO)
 
 
 class StartTestTC(ApycotBaseTC):
@@ -94,24 +104,27 @@
             require_group=managers)
 
     def test_new_vc_trigger(self):
+        self.skip('XXX need update')
         self.lgc.set_attributes(start_mode=u'on new revision')
         lgc2 = self.add_test_config(u'lgc2', start_mode=u'manual', env=self.lgce)
         lgc3 = self.add_test_config(u'lgc3', start_mode=u'on new revision',
                                     check_config=u'branch=stable', env=self.lgce)
         lgce2 = self.request().create_entity(
             'ProjectEnvironment', name=u'lgce2',
-            check_preprocessors=u'install=setup_install',
-            vcs_repository_type=u'hg',
-            vcs_repository=u'http://www.logilab.org/src/logilab/common',
+            check_config=u'install=setup_install',
             vcs_path=u'dir1',
             )
+        req = self.request()
+        req.create_entity('Repository', type=u'mercurial',
+                          source_url=u'http://www.logilab.org/src/logilab/common',
+                          reverse_local_repository=lgce2)
         lgc4 = self.add_test_config(u'lgc4', start_mode=u'on new revision',
                                     check_config=u'branch=default',
                                     env=lgce2, start_rev_deps=True)
         lgc5 = self.add_test_config(u'lgc5', start_mode=u'manual',
                                     env=lgce2)
         self.commit()
-        r = self.request().create_entity('Repository', path=u'data/hgrepo', type=u'mercurial', encoding=u'utf8')
+        r = self.request().create_entity('Repository', path=HGREPO, type=u'mercurial', encoding=u'utf8')
         self.execute('SET PE local_repository R WHERE PE is ProjectEnvironment, R is Repository')
         self.grant_write_perm(r, 'managers')
         self.commit()
@@ -143,19 +156,22 @@
                                                             'start_rev_deps': False})])
 
     def test_datetime_trigger(self):
+        self.skip('XXX need update')
         self.lgc.set_attributes(start_mode=u'hourly')
         lgc2 = self.add_test_config(u'lgc2', start_mode=u'hourly', env=self.lgce)
-        lgce2 = self.request().create_entity(
+        req = self.request()
+        lgce2 = req.create_entity(
             'ProjectEnvironment', name=u'lgce2',
-            check_preprocessors=u'install=setup_install',
-            vcs_repository_type=u'hg',
-            vcs_repository=u'http://www.logilab.org/src/logilab/common',
+            check_config=u'install=setup_install',
             vcs_path=u'dir1',
             )
+        req.create_entity('Repository', type=u'mercurial',
+                          source_url=u'http://www.logilab.org/src/logilab/common',
+                          reverse_local_repository=lgce2)
         lgc3 = self.add_test_config(u'lgc3', start_mode=u'hourly',
                                     check_config=u'branch=default',
                                     env=lgce2, start_rev_deps=True)
-        r = self.request().create_entity('Repository', path=u'data/hgrepo', type=u'mercurial', encoding=u'utf8')
+        r = req.create_entity('Repository', path=HGREPO, type=u'mercurial', encoding=u'utf8')
         self.grant_write_perm(r, 'managers')
         r.vcs_add(u'dir1', u'tutu.png', Binary('data'))
         self.execute('SET PE local_repository R WHERE PE name "lgce2"')
@@ -176,8 +192,8 @@
                           set((('lgce', 'lgc', False, None),
                                ('lgce2', 'lgc3', True, 'default')))
                           )
-        self.login('apycotbot', password='apycot')
-        ex = self.add_execution('lgc3', (), setend=True)
+        self.login('narval', password='narval0')
+        ex = self.dumb_execution('lgc3', (), setend=True)
         ex.set_relations(using_revision=r.branch_head())
         self.commit()
         self.restore_connection()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/unittest_parser.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,103 @@
+import logging
+
+from logilab.common.testlib import TestCase, unittest_main
+
+import utils # import this first
+
+from apycotlib import SimpleOutputParser
+from apycotlib.writer import AbstractLogWriter
+from apycotlib.checkers.jslint import JsLintParser
+
+
+class SimpleLogWriter(AbstractLogWriter):
+    """Simple writer only able to handle log/debug/info/warning/error/fatal methods"""
+
+    def __init__(self):
+        self.messages = []
+
+    def _log(self, severity, path, line, msg):
+        self.messages.append((severity, path, line, msg))
+
+class ParserTC(TestCase):
+    parser_class = None
+
+    def validate_parser(self, text_input, msg_output, *args, **kwargs):
+        fake_stream = text_input.splitlines(True)
+        writer = SimpleLogWriter()
+        assert self.parser_class is not None, 'No parser_class defined'
+        parser = self.parser_class(writer, *args, **kwargs)
+        parser.parse(fake_stream)
+        self.assertListEquals(writer.messages, msg_output)
+        return parser
+
+class SimpleOutputParserTC(ParserTC):
+
+    parser_class = SimpleOutputParser
+
+    def test_input(self):
+        text_input = """
+E:toto bob
+W:Warning toto
+W:machin
+F:Urg arg
+"""
+        msg_output = [
+            (logging.ERROR, None, None, 'toto bob'),
+            (logging.WARNING, None, None, 'Warning toto'),
+            (logging.WARNING, None, None, 'machin'),
+            (logging.FATAL, None, None, 'Urg arg'),
+        ]
+        parser = self.validate_parser(text_input, msg_output)
+
+class JsLintParserTC(ParserTC):
+
+    parser_class = JsLintParser
+
+    def test_input(self):
+        text_input = """
+Lint at line 8 character 1: 'CubicWeb' is not defined.
+CubicWeb.require('htmlhelpers.js');
+
+Lint at line 8 character 6: Expected 'updateMessage' to have an indentation at 9 instead at 6.
+updateMessage(_("bookmark has been removed"));
+
+Lint at line 8 character 6: 'updateMessage' is not defined.
+updateMessage(_("bookmark has been removed"));
+
+Lint at line 8 character 20: Unexpected dangling '_' in '_'.
+updateMessage(_("bookmark has been removed"));
+
+Lint at line 8 character 20: '_' is not defined.
+updateMessage(_("bookmark has been removed"));
+
+Lint at line 8 character 1: Mixed spaces and tabs.
+updateMessage(_("bookmark has been removed"));
+
+Lint at line 41 character 23: Missing space after 'function'.
+tabbable: function(a, i, m) {
+
+Lint at line 42 character 2: Confusing plusses.
+++a;
+
+
+Lint at line 45 character 8: Expected an operator and instead saw 'in'.
+singing in the rain
+
+"""
+        msg_output = [
+            (logging.ERROR,   'toto.js', u'8:1', u"'CubicWeb' is not defined."),
+            (logging.WARNING, 'toto.js', u'8:6',
+                u"Expected 'updateMessage' to have an indentation at 9 instead at 6."),
+            (logging.ERROR,   'toto.js', u'8:6', u"'updateMessage' is not defined."),
+            (logging.ERROR,   'toto.js', u'8:20', u"Unexpected dangling '_' in '_'."),
+            (logging.ERROR,   'toto.js', u'8:20', u"'_' is not defined."),
+            (logging.ERROR,   'toto.js', u'8:1', u'Mixed spaces and tabs.'),
+            (logging.ERROR,   'toto.js', u'41:23', u"Missing space after 'function'."),
+            (logging.INFO,    'toto.js', u'42:2', u'Confusing plusses.'),
+            (logging.WARNING, 'toto.js', u'45:8', u"Expected an operator and instead saw 'in'."),
+        ]
+        parser = self.validate_parser(text_input, msg_output, path='toto.js')
+
+
+if __name__ == '__main__':
+    unittest_main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/unittest_repositories.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,62 @@
+"""unit tests for apycot.repositories"""
+
+from logilab.common import testlib
+
+from copy import copy
+
+from utils import MockVCSFile as VCSFile
+
+from apycotlib.repositories import *
+
+
+class GetRepositoryTC(testlib.TestCase):
+    def test(self):
+        vcsfile = VCSFile('mercurial', source_url='http://www.labas.org')
+        repo = get_repository({'repository': vcsfile, 'path': 'toto'})
+        self.assert_(isinstance(repo, HGRepository))
+        vcsfile = VCSFile('subversion', source_url='file://toto')
+        repo = get_repository({'repository': vcsfile})
+        self.assert_(isinstance(repo, SVNRepository))
+
+class SVNRepositoryTC(testlib.TestCase):
+    _tested_class = SVNRepository
+    name = 'subversion'
+
+    def test_co_command(self):
+        vcsfile = VCSFile('subversion', source_url='file://test')
+        repo_def = {'repository':vcsfile, 'path': 'path'}
+        repo = SVNRepository(repo_def)
+        self.assertEquals(repo_def, {})
+        self.assertEquals(repo.co_command(),
+                          'svn checkout --non-interactive -q file://test/path')
+        repo_def = {'repository': vcsfile, 'path': 'path', 'branch':'branch'}
+        repo = SVNRepository(repo_def)
+        self.assertEquals(repo_def, {})
+        self.assertEquals(repo.co_command(),
+                          'svn checkout --non-interactive -q file://test/branch/path')
+
+    def test_co_path(self):
+        vcsfile = VCSFile('subversion', source_url='http://test.logilab.org/svn')
+        repo = SVNRepository({'repository':vcsfile, 'path':'toto/path'})
+        self.assertEquals(repo.co_path, 'path')
+
+    def test_specials(self):
+        vcsfile = VCSFile('subversion', source_url='test')
+        repo = SVNRepository({'repository':vcsfile, 'path': 'toto/path'})
+        self.assertEquals(repr(repo), 'subversion:test/toto/path')
+        self.assert_(repo == copy(repo))
+        vcsfile = VCSFile('subversion', source_url='test')
+        repo2 = SVNRepository({'repository':vcsfile, 'path': 'tutu/path'})
+        self.assert_(not repo == repo2)
+
+class HGRepositoryTC(testlib.TestCase):
+    def test_co_path(self):
+        vcsfile = VCSFile('mercurial', path='toto/path')
+        repo = HGRepository({'repository':vcsfile, 'path': 'common'})
+        self.assertEquals(repo.co_path, 'path/common')
+        vcsfile = VCSFile('mercurial', path='toto/path')
+        repo = HGRepository({'repository': vcsfile, 'path': 'common/sub'})
+        self.assertEquals(repo.co_path, 'path/common/sub')
+
+if __name__ == '__main__':
+    testlib.unittest_main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/unittest_task.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,469 @@
+import shutil
+import tarfile
+import sys
+import os
+from os.path import exists, join, abspath
+
+from logilab.common.testlib import TestCase, unittest_main, mock_object, within_tempdir
+
+# import this first will set import machinery on
+from utils import MockTestWriter, MockRepository, MockConnection, MockVCSFile, input_path
+
+from apycotlib import SetupException
+from apycotlib import SUCCESS, FAILURE, PARTIAL, SKIPPED
+from apycotlib.atest import Test as BaseTest
+from apycotlib.repositories import SVNRepository
+
+def Test(tconfig, *args, **kwargs):
+    pps = kwargs.pop('preprocessors', None)
+    checkers = kwargs.pop('checkers', None)
+    repo = kwargs.pop('repo', MOCKREPO)
+    environment = kwargs.pop('environment',
+                             Environment(eid=tconfig.name, apycot_preprocessors={}))
+    texec = mock_object(configuration=tconfig, environment=environment,
+                        branch=environment.conf.get('branch'))
+    test = BaseTest(texec, *args, **kwargs)
+    if pps is not None:
+        test.apycot_preprocessors = lambda x: pps
+    if checkers is not None:
+        test.checkers = checkers
+    if repo is not None:
+        test._repositories[environment.eid] = repo
+    return test
+
+class TestConfig:
+    def __init__(self, name, dependencies=(), environ=None, conf=None):
+        self.name = self.eid = name
+        #self._repo = repo
+        self._dependencies = dependencies
+        self._environ = environ or {'ZIGUOUIGOUI': 'YOOOO'}
+        self.all_checks = ()
+        if conf is None:
+            conf = {}
+        self._configuration = conf
+
+    def apycot_process_environment(self):
+        return self._environ
+    def apycot_configuration(self, pe):
+        return self._configuration.copy()
+    def dependencies(self):
+        return self._dependencies
+
+class Environment(object):
+    def __init__(self, eid, apycot_preprocessors={},
+                 repository=MockVCSFile('mercurial',
+                                     source_url='http://bob.org/hg/toto/'),
+                 conf=None):
+        self.eid = eid
+        self.apycot_preprocessors = apycot_preprocessors
+        self.repository = repository
+        if conf is None:
+            conf = {}
+        self.conf = conf
+        self.name=''
+        self.vcs_path = ''
+
+    def apycot_configuration(self):
+        return self.conf
+    def apycot_process_environment(self):
+        return {}
+
+# mock objects ################################################################
+
+class CleanRaisePreprocessor:
+    id = 'clean_raise_preprocessor'
+    def match(self, name):
+        return 1
+
+    def run(self, test, path=None):
+        if path is None:
+            return 1
+        else: 
+            return 0
+
+class SetupRaisePreprocessor:
+    id = 'setup_raise_preprocessor'
+    def match(self, name):
+        return 1
+
+    def run(self, test, path=None):
+        if path is None:
+            raise SetupException('in test_preprocessor.test_setup')
+        else:
+            raise SetupException('%s failed on %r' % (self.id, path))
+
+class TouchTestPreprocessor:
+    id = 'touch_preprocessor'
+    file = None
+    file2 = None
+
+    def run(self, test, path=None):
+        self.file = join(test.tmpdir, 'TestTC_pp')
+        self.file2 = join(test.tmpdir, 'TestTC2_pp')
+        f = open(self.file, 'w')
+        f.close()
+        f = open(self.file2, 'w')
+        f.close()
+
+class SimplePreprocessor(object):
+
+    id = 'simple_preprocessor'
+    def __init__(self):
+        self.processed    = {}
+
+    def run(self, test, path=None):
+        if path == None:
+            path = test.project_path()
+        self.processed.setdefault(path, 0)
+        self.processed[path] += 1
+
+class DummyTest(object):
+    need_preprocessor = None
+
+    def check(self, test, writer):
+        return SUCCESS
+    def check_options(self):
+        pass
+
+class SuccessTestChecker(DummyTest):
+    id = 'success_test_checker'
+    options = {}
+    need_preprocessor = 'install'
+    def check(self, test, writer):
+        return SUCCESS
+
+class FailureTestChecker(DummyTest):
+    id = 'failure_test_checker'
+    options = {}
+    def check(self, test, writer):
+        return FAILURE
+
+class ErrorTestChecker(DummyTest):
+    id = 'error_test_checker'
+    options = {}
+    def check(self, test, writer):
+        raise Exception('never succeed!')
+
+
+# real tests ##################################################################
+
+# manage temporary repo
+def setup_module(*args):
+    for repo in ('badpkg2_svn', 'goodpkg_svn'):
+        path = input_path(repo)
+        if exists(path):
+            shutil.rmtree(path)
+        tarfile.open(input_path('%s.tar.gz' % repo), 'r|gz').extractall(input_path(''))
+
+def teardown_module(*args):
+    if input_path is None: # import error
+        return
+    for repo in ('badpkg2', 'goodpkg'):
+        path = input_path(repo)
+        if exists(path):
+            shutil.rmtree(path)
+
+
+MOCKVCSFILE = MockVCSFile('subversion', path='/home/cvs')
+MOCKREPO = MockRepository(repository=MOCKVCSFILE,
+                          path='soft/goodpkg',
+                          command='cp -R %s .' % input_path('goodpkg'))
+BADREPO = MockRepository(repository=MOCKVCSFILE,
+                         path='soft/goodpkg', command='false')
+SVNREPO1 = SVNRepository({'repository': MockVCSFile('subversion',
+                                                    source_url='file://%s' % input_path('goodpkg'))})
+SVNREPO2 = SVNRepository({'repository': MockVCSFile('subversion',
+                                                    source_url='file://%s' % input_path('badpkg2'))})
+CONN = MockConnection()
+TPP = TouchTestPreprocessor()
+
+class TestTC(TestCase):
+
+    @within_tempdir
+    def test_setup_installed(self):
+        self.skip('to be done by pyves')
+        pp = SimplePreprocessor()
+        test = Test(TestConfig('yo', dependencies=(Environment('pypasax'),)),
+                    MockTestWriter(), {},
+                    checkers=[SuccessTestChecker()],
+                    preprocessors={'install': pp})
+        test._repositories['pypasax'] = SVNREPO2
+        test.setup()
+        self.assertEquals(pp.processed, {'soft/goodpkg': 1, 'badpkg2': 1})
+
+
+    def test_setup_no_install(self):
+        self.skip('to be done by pyves')
+        test = Test(TestConfig('yo', dependencies=(Environment('pypasax'),)),
+                    MockTestWriter(), {},
+                    preprocessors={'install': TPP})
+        test._repositories['pypasax'] = SVNREPO2
+        # no checks requiring installation, main repo should be checked out though not installed,
+        # and dependencies shouldn't be installed
+        try:
+            test.setup()
+            self.assertEquals(os.environ['ZIGUOUIGOUI'], 'YOOOO')
+            self.failUnless(exists('goodpkg'))
+            self.failIf(exists('badpkg2'))
+            self.failUnless(TPP.file is None or not exists(TPP.file))
+        finally:
+            os.environ.pop('ZIGUOUIGOUI', None)
+            if exists('goodpkg'):
+                shutil.rmtree('goodpkg')
+
+    def test_python_setup(self):
+        self.skip('to be done by pyves')
+        test = Test(TestConfig('yo', dependencies=(Environment('pypasax'),)),
+                    MockTestWriter(), {},
+                    checkers=[SuccessTestChecker()],
+                    preprocessors={'install': TPP})
+        test._repositories['pypasax'] = SVNREPO2
+        try:
+            test.setup()
+            tconfig = test.tconfig
+            projectenvs = (tconfig.environment,) + tconfig.dependencies()
+            for pe in projectenvs:
+                test.checkout(pe)
+            self.assertEquals(os.environ['ZIGUOUIGOUI'], 'YOOOO')
+            self.failUnless(exists('goodpkg'))
+            self.failUnless(exists('badpkg2'))
+            self.failUnless(exists(TPP.file))
+        finally:
+            if exists('goodpkg'):
+                shutil.rmtree('goodpkg')
+            if exists('badpkg2'):
+                shutil.rmtree('badpkg2')
+            if TPP.file is not None and exists(TPP.file):
+                os.remove(TPP.file)
+            if TPP.file2 is not None and exists(TPP.file2):
+                os.remove(TPP.file2)
+            del os.environ['ZIGUOUIGOUI']
+
+    @within_tempdir
+    def _test_setup_ex(self, test, msg=None):
+        self.skip('to be done by pyves')
+        try:
+            test.setup()
+            self.failUnless(test._failed_pp, 'Preprocessors should have failed')
+        except SetupException, ex:
+            if msg:
+                self.assertEquals(str(ex), msg)
+
+    def test_setup_raise(self):
+        self.skip('to be done by pyves')
+        # test bad checkout command
+        test = Test(TestConfig('yo'), MockTestWriter(), {}, repo=BADREPO)
+        self._test_setup_ex(test, "`false` returned with status : 1")
+        # test bad dependencies checkout
+        test = Test(TestConfig('yo', dependencies=(Environment('toto'),),
+                               ),
+                    MockTestWriter(), {},
+                    checkers=[SuccessTestChecker()])
+        test._repositories['toto'] = BADREPO
+        self._test_setup_ex(test)
+        # test bad preprocessing
+        test = Test(TestConfig('yo', dependencies=(Environment('pypasax'),),
+                               ),
+                    MockTestWriter(), {},
+                    checkers=[SuccessTestChecker()],
+                    preprocessors={'install': SetupRaisePreprocessor()})
+        test._repositories['pypasax'] = SVNREPO2
+        self._test_setup_ex(test)
+
+    def test_clean(self):
+        self.skip("We don't execute whole test anymore")
+        test = Test(TestConfig('yo', dependencies=(Environment('pypasax'),),
+                               ),
+                    MockTestWriter(), {},
+                    checkers=[SuccessTestChecker()],
+                    preprocessors={'install': TPP})
+        test._repositories['pypasax'] = SVNREPO2
+        # clean should never fail
+        # but most interesting things occurs after setup...
+        test.execute()
+        self.assertNotNone(TPP.file, "Preprocessors have not been run")
+        self.failIf(exists('goodpkg'))
+        self.failIf(exists('badpkg2'))
+        self.failIf(exists(TPP.file))
+
+    def test_execute_1(self):
+        self.skip("We don't execute whole test anymore")
+        test = Test(TestConfig('yo', dependencies=(Environment('Pypasax'),),
+                               ),
+                    MockTestWriter(), {}, repo=SVNREPO1,
+                    checkers=[SuccessTestChecker(), FailureTestChecker(), ErrorTestChecker()],
+                    preprocessors={'install': TPP})
+        test._repositories['Pypasax'] = SVNREPO2
+        test.execute()
+        self.assertNotNone(TPP.file)
+        self.failIf(exists(TPP.file))
+        self.assertNotNone(TPP.file2)
+        self.failIf(exists(TPP.file2))
+
+    def test_execute_2(self):
+        self.skip("We don't execute whole test anymore")
+        test = Test(TestConfig('yo', dependencies=(Environment('Pypasax', SVNREPO2),),
+                                     ),
+                    MockTestWriter(), {}, repo=SVNREPO1,
+                    checkers=[SuccessTestChecker(), FailureTestChecker(), ErrorTestChecker()],
+                    preprocessors={'install:': SetupRaisePreprocessor()})
+        test._repositories['Pypasax'] = SVNREPO2
+        test.execute()
+
+    def test_execute_0(self):
+        self.skip("We don't execute whole test anymore")
+        command = 'cp -R '+abspath('inputs/goodpkg')+' .'
+        cwd = os.getcwd()
+        test = Test(TestConfig('yo', dependencies=(Environment('Pypasax'),)),
+                    MockTestWriter(), {})
+        test._repositories['Pypasax'] = SVNREPO2
+        self.failUnless(exists(test.tmpdir))
+        test.execute()
+        self.failIf(exists(test.tmpdir))
+        self.assertEquals(os.getcwd(), cwd)
+
+        test = Test(TestConfig('yo', dependencies=(Environment('Pypasax'),)),
+                    MockTestWriter(), {'keep-test-dir':1},)
+        test._repositories['Pypasax'] = SVNREPO2
+        self.failUnless(exists(test.tmpdir))
+        test.execute()
+        self.failIf(exists(test.tmpdir))
+        self.assertEquals(os.getcwd(), cwd)
+
+        test = Test(TestConfig('yo', dependencies=(Environment('Pypasax'),),
+                                     ),
+                    MockTestWriter()(), {}, repo=SVNREPO1,
+                    checkers=[SuccessTestChecker()],
+                    preprocessors={'install': SetupRaisePreprocessor()})
+        test._repositories['Pypasax'] = SVNREPO2
+        self.failUnless(exists(test.tmpdir))
+        test.execute()
+        self.failIf(exists(test.tmpdir))
+        self.assertEquals(os.getcwd(), cwd)
+
+        test = Test(TestConfig('yo', dependencies=(Environment('Pypasax'),),
+                                     ),
+                    MockTestWriter()(), {}, repo=SVNREPO1,
+                    checkers=[SuccessTestChecker()],
+                    preprocessors={'install': CleanRaisePreprocessor()})
+        test._repositories['Pypasax'] = SVNREPO2
+        self.failUnless(exists(test.tmpdir))
+        test.execute()
+        self.failIf(exists(test.tmpdir))
+        self.assertEquals(os.getcwd(), cwd)
+
+    def test_branch(self):
+        test = Test(TestConfig('yo'),
+                    MockTestWriter(), {},
+                    environment=Environment('babar', conf={'branch': 'bob'}),
+                    checkers=[SuccessTestChecker()])
+        del test._repositories['babar'] # XXX clean up this mess
+        repo = test.apycot_repository()
+        self.assertEquals(repo.branch, 'bob')
+
+    def test_branch_deps_with_branch(self):
+        dep = Environment('babar', conf={'branch': 'Onk'})
+        test = Test(TestConfig('yo', dependencies=(dep, )),
+                    MockTestWriter(), {},
+                    checkers=[SuccessTestChecker()])
+        repo = test.apycot_repository(dep)
+        self.assertEquals(repo.branch, 'Onk')
+
+    def test_branch_deps_without_branch(self):
+        dep = Environment('babar')
+        test = Test(TestConfig('yo', dependencies=(dep, )),
+                    MockTestWriter(), {},
+                    checkers=[SuccessTestChecker()])
+        repo = test.apycot_repository(dep)
+        # default should be the branch name (as none is defined)
+        self.assertEquals(repo.branch, 'default')
+
+
+class EnvironmentTrackerMixinTC(TestCase):
+
+    def setUp(self):
+        os.environ['PYTHONPATH'] = ''
+        self.tracker = Test(TestConfig('yo', dependencies=(Environment('pypasax'),)),
+                            MockTestWriter(), {})
+
+    def test_update_clean_env(self):
+        lc_all = os.environ.get('LC_ALL')
+        self.tracker.update_env('key', 'LC_ALL', 'XXXX')
+        self.assertEquals(os.environ['LC_ALL'], 'XXXX')
+        self.tracker.clean_env('key', 'LC_ALL')
+        self.assertEquals(os.environ.get('LC_ALL'), lc_all)
+
+        self.tracker.update_env('key', '__ENVIRONMENTTRACKERMIXINTC__', 'XXXX')
+        self.assertEquals(os.environ['__ENVIRONMENTTRACKERMIXINTC__'], 'XXXX')
+        self.tracker.clean_env('key', '__ENVIRONMENTTRACKERMIXINTC__')
+        self.assertRaises(KeyError, os.environ.__getitem__,
+                          '__ENVIRONMENTTRACKERMIXINTC__')
+
+    def test_nested(self):
+        lc_all = os.environ.get('LC_ALL')
+        self.tracker.update_env('key', 'LC_ALL', 'XXXX')
+        self.assertEquals(os.environ['LC_ALL'], 'XXXX')
+        self.tracker.update_env('key2', 'LC_ALL', 'YYYY')
+        self.assertEquals(os.environ['LC_ALL'], 'YYYY')
+        self.tracker.clean_env('key2', 'LC_ALL')
+        self.assertEquals(os.environ['LC_ALL'], 'XXXX')
+        self.tracker.clean_env('key', 'LC_ALL')
+        self.assertEquals(os.environ.get('LC_ALL'), lc_all)
+
+    def test_update_clean_env_sep(self):
+        path = os.environ['PATH']
+        self.tracker.update_env('key', 'PATH', '/mybin', ':')
+        self.assertEquals(os.environ['PATH'], '/mybin:' + path)
+        self.tracker.clean_env('key', 'PATH')
+        self.assertEquals(os.environ['PATH'], path)
+
+    def test_nested_sep(self):
+        path = os.environ['PATH']
+        self.tracker.update_env('key', 'PATH', '/mybin', ':')
+        if path:
+            self.assertEquals(os.environ['PATH'], '/mybin:' + path)
+        else:
+            self.assertEquals(os.environ['PATH'], '/mybin')
+        self.tracker.update_env('key2', 'PATH', '/myotherbin', ':')
+        if path:
+            self.assertEquals(os.environ['PATH'], '/myotherbin:/mybin:' + path)
+        else:
+            self.assertEquals(os.environ['PATH'], '/myotherbin:/mybin')
+        self.tracker.clean_env('key2', 'PATH')
+        if path:
+            self.assertEquals(os.environ['PATH'], '/mybin:' + path)
+        else:
+            self.assertEquals(os.environ['PATH'], '/mybin')
+        self.tracker.clean_env('key', 'PATH')
+        self.assertEquals(os.environ['PATH'], path)
+
+    def test_python_path_sync(self):
+        self.tracker.update_env('key', 'PYTHONPATH', '/mylib', ':')
+        self.assertEquals(os.environ['PYTHONPATH'], '/mylib')
+        self.assertEquals(sys.path[0], '/mylib')
+        self.tracker.update_env('key2', 'PYTHONPATH', '/otherlib', ':')
+        self.assertEquals(os.environ['PYTHONPATH'], '/otherlib:/mylib')
+        self.assertEquals(sys.path[0], '/otherlib')
+        self.tracker.clean_env('key2', 'PYTHONPATH')
+        self.assertEquals(os.environ['PYTHONPATH'], '/mylib')
+        self.assertNotEquals(sys.path[0], '/otherlib')
+        self.tracker.clean_env('key', 'PYTHONPATH')
+        self.assertEquals(os.environ['PYTHONPATH'], '')
+        self.assertNotEquals(sys.path[0], '/otherlib')
+
+    def test_update_undefined_env(self):
+
+        var = 'XNZOUACONFVESUHFJGSLKJ'
+        while os.environ.get(var) is not None:
+            var = ''.join(chr(randint(ord('A'), ord('Z') +1))
+                for cnt in xrange(randint(10, 20)))
+
+        self.tracker.update_env('key', var, 'to be or not to be', ':')
+        self.assertTextEquals(os.environ.get(var),  'to be or not to be')
+        self.tracker.clean_env('key', var)
+        self.assertEquals(os.environ.get(var), None)
+
+
+
+if __name__ == '__main__':
+    unittest_main()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/unittest_writer.py	Fri Sep 10 14:14:42 2010 +0200
@@ -0,0 +1,39 @@
+"""unit tests for the apycotlib.writer module"""
+import os
+from logilab.common.testlib import TestCase, unittest_main
+
+import utils
+
+from apycotlib.writer import BaseDataWriter
+
+
+class BaseDataWriterTC(TestCase):
+
+    def setUp(self):
+        self.writer = BaseDataWriter(None, 1)
+
+    def test__msg_info_01(self):
+        path, line, msg = self.writer._msg_info('bonjour %s', 'vous')
+        self.assertEquals(path, None)
+        self.assertEquals(line, None)
+        self.assertEquals(msg, 'bonjour vous')
+
+    def test__msg_info_02(self):
+        path, line, msg = self.writer._msg_info('bonjour %s', 'vous', path='/tmp', line=1)
+        self.assertEquals(path, '/tmp')
+        self.assertEquals(line, 1)
+        self.assertEquals(msg, 'bonjour vous')
+
+    def test__msg_info_03(self):
+        try:
+            os.path.isdir(1)
+        except:
+            path, line, msg = self.writer._msg_info('oops %s', 'badaboum', tb=True)
+        self.assertEquals(path, None)
+        self.assertEquals(line, None)
+        self.failUnless(msg.startswith('oops badaboum'))
+        self.failUnless('Traceback' in msg)
+
+
+if __name__ == '__main__':
+    unittest_main()
--- a/test/utils.py	Wed Jul 28 12:10:03 2010 +0200
+++ b/test/utils.py	Fri Sep 10 14:14:42 2010 +0200
@@ -1,20 +1,131 @@
+import shutil
+from datetime import datetime
+from os.path import join, dirname, abspath
+
+from logilab.common.testlib import mock_object
+
 from cubicweb.devtools.testlib import CubicWebTC
-from datetime import datetime
 
 from cubes.apycot import entities
 
-class MockProxy(object):
+from apycotlib.writer import CheckDataWriter, BaseDataWriter
+
+INPUTS_DIR = abspath(join(dirname(__file__), 'data'))
+
+def input_path(file=''):
+    return join(INPUTS_DIR, file)
+
+
+class DummyStack(object):
+
+    def __init__(self):
+        self.msg = None
+        self.clear()
+
+    def __getitem__(self, idx):
+        return self
+
+    def __len__(self):
+        return 0
+
+    def clear(self):
+        self.msg = []
+        self.append = self.msg.append
+
+
+class MockBaseWriter(BaseDataWriter):
+
     def __init__(self):
-        self.queued = []
-    def queue_task(self, *args, **kwargs):
-        self.queued.append( (args, kwargs) )
-    def get_archive(self, *args, **kwargs):
-        return 'hop'
+        super(MockBaseWriter, self).__init__( MockConnection, None)
+
+    def skip(self, *args, **kwargs):
+        pass
+
+    def _debug(self, *args, **kwargs):
+        print args, kwargs
+
+    def set_exec_status(self, status):
+        self._logs.append('<internal> SETTING EXEC STATUS: %s' % status)
+
+    raw = execution_info = skip
+    close = skip
+
+
+class MockTestWriter(MockBaseWriter):
+    """fake apycot.IWriter class, ignore every thing"""
+
+    def make_check_writer(self):
+        return MockCheckWriter()
+
+    link_to_revision = MockBaseWriter.skip
+
+
+class MockCheckWriter(MockBaseWriter):
+    """fake apycot.IWriter class, ignore every thing"""
+
+    def start(self, checker):
+        self._logs.append('<internal>STARTING %s' % checker.id)
+
+    def clear_writer(self):
+        self._log_stack = DummyStack()
+
+
+class MockTest(object):
+    """fake apycot.Test.Test class"""
+    def __init__(self, repo=None):
+        self.repo = repo
+        self.tmpdir = 'data'
+        self.environ = {}
+        self.checkers = []
+
+    def project_path(self, subpath=False):
+        return self.repo.co_path
 
-proxy = MockProxy()
-def bot_proxy(config, cache):
-    return proxy
-entities.bot_proxy = bot_proxy
+    @property
+    def tconfig(self):
+        return mock_object(testconfig={}, name='bob', subpath=None)
+
+    def apycot_config(self, something=None):
+        return {}
+
+
+class MockVCSFile:
+    def __init__(self, _type, source_url=None, path=None):
+        self.source_url = source_url
+        self.path = path
+        self.type = _type
+        self.local_cache = None
+
+
+class MockRepository:
+    """fake apycot.IRepository class"""
+    branch = None
+    def __init__(self, attrs=None, **kwargs):
+        self.__dict__.update(kwargs)
+        self.co_path = self.path
+
+    def co_command(self):
+        return self.command
+
+    def co_move_to_branch_command(self):
+        return None
+
+    def __repr__(self):
+        return '<MockRepository %r>' % self.__dict__
+
+    def revision(self):
+        pass
+
+
+class MockConnection(object):
+    """fake pyro connexion"""
+    def close(self):
+        pass
+    def execute(*args, **kwargs):
+        pass
+    def commit(self):
+        pass
+
 
 class ApycotBaseTC(CubicWebTC):
 
@@ -22,47 +133,43 @@
         req = self.request()
         self.lgce = req.create_entity(
             'ProjectEnvironment', name=u'lgce',
-            check_preprocessors=u'install=setup_install',
-            vcs_repository_type=u'hg',
-            vcs_repository=u'http://www.logilab.org/src/logilab/common',
-            check_config=u'env-option=value'
+            check_config=u'install=setup_install\nenv-option=value'
             )
-        self.pyp = req.create_entity('TestConfigGroup', name=u'PYTHONPACKAGE',
-                                   checks=u'python_pkg,pkg_doc,python_syntax,'
-                                   'python_lint,python_unittest,python_test_coverage',
-                                   check_config=u'python_lint_treshold=7\n'
-                                   'python_lint_ignore=thirdparty\n'
-                                   'python_test_coverage_treshold=70\n')
+        self.vcsrepo = req.create_entity('Repository', type=u'mercurial',
+                                         source_url=u'http://www.logilab.org/src/logilab/common',
+                                         reverse_local_repository=self.lgce)
+        self.pyp = req.create_entity('TestConfig', name=u'PYTHONPACKAGE',
+                                     check_config=u'python_lint_treshold=7\n'
+                                     'python_lint_ignore=thirdparty\n'
+                                     'python_test_coverage_treshold=70\n')
         self.lgc = self.add_test_config(u'lgc', env=self.lgce, group=self.pyp)
+        self.recipe = req.create_entity('Recipe', name=u'functest.noop',
+                                        reverse_use_recipe=self.lgc)
+        step1 = self.recipe.add_step(u'action', u'basic.noop', initial=True)
+        step2 = self.recipe.add_step(u'action', u'basic.noop', final=True)
+        tr = self.recipe.add_transition(step1, step2)
+
         self.repo.threaded_task = lambda func: func() # XXX move to cw
 
-
-    def add_test_config(self,
-                        name, checks=u'python_lint,python_unittest,python_test_coverage',
+    def add_test_config(self, name,
                         check_config=u'python_lint_treshold=8\npouet=5',
                         env=None, group=None, **kwargs):
         """add a TestConfig instance"""
         req = self.request()
-        tc = req.create_entity('TestConfig', name=name, checks=checks,
-                               check_config=check_config, **kwargs)
         if group is not None:
-            tc.set_relations(use_group=group)
+            kwargs['refinement_of'] = group
         if env is not None:
-            tc.set_relations(use_environment=env)
-        return tc
+            kwargs['use_environment'] = env
+        return req.create_entity('TestConfig', name=name,
+                                 check_config=check_config, **kwargs)
 
-
-    def add_execution(self, confname, check_defs, setend=True):
+    def dumb_execution(self, ex, check_defs, setend=True):
         """add a TestExecution instance"""
         req = self.request()
-        ex = req.create_entity('TestExecution', starttime=datetime.now())
-        req.execute('SET X using_config Y WHERE X eid %(x)s, Y name %(confname)s',
-                    {'x': ex.eid, 'confname': confname})
         for name, status in check_defs:
             cr = req.create_entity('CheckResult', name=unicode(name), status=unicode(status))
-            req.execute('SET X during_execution Y WHERE X eid %(x)s, Y is TestExecution',
-                        {'x': cr.eid})
+            req.execute('SET X during_execution Y WHERE X eid %(x)s, Y eid %(e)s',
+                        {'x': cr.eid, 'e': ex.eid})
         if setend:
-            req.execute('SET X endtime %(et)s WHERE X eid %(x)s',
-                        {'et': datetime.now(), 'x': ex.eid})
-        return ex
+            req.execute('SET X starttime %(et)s, X endtime %(et)s, X status "success" '
+                        'WHERE X eid %(x)s', {'et': datetime.now(), 'x': ex.eid})
--- a/views/__init__.py	Wed Jul 28 12:10:03 2010 +0200
+++ b/views/__init__.py	Fri Sep 10 14:14:42 2010 +0200
@@ -1,36 +1,31 @@
 '''apycot reports'''
 
+import re
+
 from cubicweb.view import NOINDEX, NOFOLLOW
 from cubicweb.web import uicfg, formwidgets as wdgs
+from cubicweb.web.views import urlpublishing
 from cubicweb.web.views.urlrewrite import rgx, build_rset, SchemaBasedRewriter, \
                                           SimpleReqRewriter
 
-from cubes.apycot.entities import bot_proxy
-
-
-def no_robot_index(self):
-    return [NOINDEX, NOFOLLOW]
+from cubes.narval.proxy import bot_proxy
 
 def anchor_name(data):
     """escapes XML/HTML forbidden characters in attributes and PCDATA"""
     return (data.replace('&', '').replace('<', '').replace('>','')
             .replace('"', '').replace("'", ''))
 
-# ui configuration #############################################################
-
+_afs = uicfg.autoform_section
+_affk = uicfg.autoform_field_kwargs
 
-_afs = uicfg.autoform_section
-_afs.tag_subject_of(('*', 'use_group', '*'), 'main', 'attributes')
-_afs.tag_object_of(('*', 'use_environment', 'ProjectEnvironment'),
-                   'main', 'inlined')
+# ui configuration #############################################################
 
 
 # register generated message id
 _('Available checkers:')
-_('Available preprocessors:')
 _('Available options:')
 
-def build_help_func(attr, apycot_type, etype='TestConfigGroup'):
+def build_help_func(attr, apycot_type, etype='TestConfig'):
     def help_func(form, attr=attr, apycot_type=apycot_type, etype=etype):
         req = form._cw
         help = req.vreg.schema.eschema(etype).rdef(attr).description
@@ -52,26 +47,24 @@
         return help
     return help_func
 
-_affk = uicfg.autoform_field_kwargs
-helpfunc = build_help_func('check_preprocessors', 'preprocessors', 'ProjectEnvironment')
-_affk.tag_attribute(('ProjectEnvironment', 'check_preprocessors'),
-                    {'help': helpfunc})
-for attr, apycot_type in (('check_config', 'options'),):
-    for etype in ('TestConfigGroup', 'ProjectEnvironment'):
-        helpfunc = build_help_func(attr, apycot_type, etype=etype)
-        _affk.tag_attribute((etype, attr), {'help': helpfunc})
-for attr, apycot_type in (('checks', 'checkers'),):
-    helpfunc = build_help_func(attr, apycot_type, 'TestConfigGroup')
-    _affk.tag_attribute(('TestConfigGroup', attr), {
-        'help': helpfunc, 'widget': wdgs.TextInput({'size': 100})})
-    _affk.tag_attribute(('TestConfig', attr), {
-        'help': helpfunc, 'widget': wdgs.TextInput({'size': 100})})
+for etype in ('TestConfig', 'ProjectEnvironment'):
+    _afs.tag_subject_of((etype, 'refinement_of', '*'), 'main', 'attributes')
+    helpfunc = build_help_func('check_config', 'options', etype=etype)
+    _affk.tag_attribute((etype, 'check_config'), {'help': helpfunc})
+
+
+_affk.tag_attribute(('ProjectEnvironment', 'vcs_path'),
+                    {'widget': wdgs.TextInput})
+
 _affk.tag_attribute(('TestConfig', 'start_mode'), {'sort': False})
+_affk.tag_attribute(('TestConfig', 'start_rev_deps'),
+                    {'allow_none': True,
+                     'choices': [(_('inherited'), ''), ('yes', '1'), ('no', '0')]})
+_affk.tag_attribute(('TestConfig', 'subpath'),
+                    {'widget': wdgs.TextInput})
+_afs.tag_attribute(('TestConfig', 'computed_start_mode'), 'main', 'hidden')
 
-_affk.tag_attribute(('ProjectEnvironment', 'vcs_repository'), {'widget': wdgs.TextInput})
-_affk.tag_attribute(('ProjectEnvironment', 'vcs_path'), {'widget': wdgs.TextInput})
-_affk.tag_attribute(('TestConfig', 'subpath'), {'widget': wdgs.TextInput})
-_affk.tag_attribute(('TestConfig', 'subpath'), {'widget': wdgs.TextInput})
+_afs.tag_subject_of(('TestConfig', 'use_recipe', '*'), 'main', 'attributes')
 
 
 _abba = uicfg.actionbox_appearsin_addmenu
@@ -80,6 +73,8 @@
 _abba.tag_object_of(('*', 'for_check', '*'), False)
 _abba.tag_object_of(('*', 'during_execution', '*'), False)
 _abba.tag_object_of(('*', 'using_config', '*'), False)
+_abba.tag_object_of(('*', 'using_environment', '*'), False)
+_abba.tag_object_of(('*', 'on_environment', '*'), False)
 
 
 # urls configuration ###########################################################
@@ -87,9 +82,15 @@
 class SimpleReqRewriter(SimpleReqRewriter):
     rules = [
         (rgx('/apycotdoc'), dict(vid='apycotdoc')),
-        (rgx('/apycotbot'), dict(vid='botstatus')),
         ]
 
+# def list_test_executions(inputurl, uri, req, schema):
+#     rql = req.vreg['etypes'].etype_class('TestExecution').fetch_rql(req.user)
+#     rset = req.execute(rql)
+#     if len(rset) > 1:
+#         req.form['vid'] = 'apycot.te.summarytable'
+#     return None, rset
+
 class RestPathRewriter(SchemaBasedRewriter):
     rules = [
         (rgx('/projectenvironment/([^/]+)/([^/]+)'),
@@ -100,4 +101,19 @@
          build_rset(rql='TestExecution Y WHERE X use_environment P, P name %(pe)s,'
                         ' X name %(tc)s, Y using_config X, Y eid %(te)s',
                     rgxgroups=[('pe', 1), ('tc', 2), ('te', 3)])),
+
+        # (rgx('/testexecution/?', re.I), list_test_executions),
         ]
+
+# XXX necessary since it takes precedence other the /testexecution/' rule above
+class RestPathEvaluator(urlpublishing.RestPathEvaluator):
+
+    def cls_rset(self, req, cls):
+        rset = super(RestPathEvaluator, self).cls_rset(req, cls)
+        if cls.__regid__ == 'TestExecution' and len(rset) > 1:
+            req.form['vid'] = 'apycot.te.summarytable'
+        return rset
+
+def registration_callback(vreg):
+    vreg.register_all(globals().values(), __name__, (RestPathEvaluator,))
+    vreg.register_and_replace(RestPathEvaluator, urlpublishing.RestPathEvaluator)
--- a/views/bot.py	Wed Jul 28 12:10:03 2010 +0200
+++ b/views/bot.py	Fri Sep 10 14:14:42 2010 +0200
@@ -7,14 +7,11 @@
 """
 __docformat__ = "restructuredtext en"
 
-from logilab.common.tasksqueue import REVERSE_PRIORITY
 from logilab.mtconverter import xml_escape
 
-from cubicweb import UnknownEid, tags
-from cubicweb.selectors import match_kwargs