Python testtools.TestResult() Examples

The following are 8 code examples of testtools.TestResult(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module testtools , or try the search function .
Example #1
Source File: _flaky.py    From flocker with Apache License 2.0 5 votes vote down vote up
def _run_prepared_result(self, result):
        """
        Run the test with a result that conforms to testtools' extended
        ``TestResult`` interface.

        This overrides a method in base ``RunTest`` which is intended to be
        overwritten.
        """
        flaky = _get_flaky_annotation(self._case)
        if flaky is not None:
            return self._run_flaky_test(self._case, result, flaky)

        # No flaky attributes? Then run as normal.
        return self._run_test(self._case, result) 
Example #2
Source File: _flaky.py    From flocker with Apache License 2.0 5 votes vote down vote up
def _run_test(self, case, result):
        """
        Run ``case`` with the ``RunTest`` we are wrapping.

        :param testtools.TestCase case: The test to run.
        :param testtools.TestResult result: The test result to report to.
            Must conform to testtools extended test result interface.
        :return: The modified ``result``.
        """
        run_test = self._run_test_factory(case, *self._args, **self._kwargs)
        return run_test._run_prepared_result(result) 
Example #3
Source File: _flaky.py    From flocker with Apache License 2.0 5 votes vote down vote up
def _attempt_test(self, case):
        """
        Run 'case' with a temporary result.

        :param testtools.TestCase case: The test to run.

        :return: a tuple of ``(successful, result, details)``, where
            ``successful`` is a boolean indicating whether the test was
            succcessful, ``result`` is a _ResultType indicating what the test
            result was and ``details`` is a dictionary of testtools details.
        """
        tmp_result = testtools.TestResult()
        # XXX: Still using internal API of testtools despite improvements in
        # #165. Will need to do follow-up work on testtools to ensure that
        # RunTest.run(case); RunTest.run(case) is supported.
        case._reset()
        self._run_test(case, tmp_result)
        result_type = _get_result_type(tmp_result)
        details = pmap(case.getDetails())
        if result_type == _ResultType.skip:
            # XXX: Work around a testtools bug where it reports stack traces
            # for skips that aren't passed through its supported
            # SkipException: https://bugs.launchpad.net/testtools/+bug/1518100
            [reason] = list(tmp_result.skip_reasons.keys())
            details = details.discard('traceback').set(
                'reason', text_content(reason))
        return (tmp_result.wasSuccessful(), result_type, details) 
Example #4
Source File: _flaky.py    From flocker with Apache License 2.0 5 votes vote down vote up
def _get_result_type(result):
    """
    Get the _ResultType for ``result``.

    :param testtools.TestResult result: A TestResult that has had exactly
        one test run on it.
    :raise ValueError: If ``result`` has run more than one test, or has more
        than one kind of result.
    :return: A _ResultType for that result.
    """
    if result.testsRun != 1:
        raise ValueError('%r has run %d tests, 1 expected' % (
            result, result.testsRun))

    total = sum(map(len, [
        result.errors, result.failures, result.unexpectedSuccesses,
        result.expectedFailures, result.skip_reasons]))
    if total > 1:
        raise ValueError(
            '%r has more than one kind of result: %r found' % (result, total))

    if len(result.errors) > 0:
        return _ResultType.error
    elif len(result.failures) > 0:
        return _ResultType.failure
    elif len(result.unexpectedSuccesses) > 0:
        return _ResultType.unexpected_success
    elif len(result.expectedFailures) > 0:
        return _ResultType.expected_failure
    elif len(result.skip_reasons) > 0:
        return _ResultType.skip
    else:
        return _ResultType.success 
Example #5
Source File: test_base.py    From flocker with Apache License 2.0 5 votes vote down vote up
def test_attaches_twisted_log(self, base_test_case):
        """
        Flocker base test cases attach the Twisted log as a detail.
        """
        # XXX: If debugging is enabled (either by setting this to True or by
        # removing this line and running --debug-stacktraces, then the log
        # fixtures in this test are empty. However, if we run a failing test
        # manually, the logs appear in the details. Not sure what's going on,
        # so disabling debugging for now.
        self.useFixture(DebugTwisted(False))

        class SomeTest(base_test_case):
            def test_something(self):
                from twisted.python import log
                log.msg('foo')

        test = SomeTest('test_something')
        result = TestResult()
        test.run(result)
        self.expectThat(result, has_results(tests_run=Equals(1)))
        self.assertThat(
            test.getDetails(),
            ContainsDict({
                'twisted-log': match_text_content(MatchesRegex(
                    r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}[+-]\d{4} \[-\] foo'
                )),
            })) 
Example #6
Source File: test_base.py    From flocker with Apache License 2.0 5 votes vote down vote up
def test_separate_eliot_log(self, base_test_case):
        """
        Flocker base test cases attach the eliot log as a detail separate from
        the Twisted log.
        """
        # XXX: If debugging is enabled (either by setting this to True or by
        # removing this line and running --debug-stacktraces, then the log
        # fixtures in this test are empty. However, if we run a failing test
        # manually, the logs appear in the details. Not sure what's going on,
        # so disabling debugging for now.
        self.useFixture(DebugTwisted(False))
        message_type = MessageType(u'foo', fields(name=str), u'test message')

        class SomeTest(base_test_case):
            def test_something(self):
                from twisted.python import log
                log.msg('foo')
                message_type(name='qux').write()

        test = SomeTest('test_something')
        result = TestResult()
        test.run(result)
        self.expectThat(result, has_results(tests_run=Equals(1)))
        self.assertThat(
            test.getDetails(),
            MatchesDict({
                'twisted-log': match_text_content(MatchesRegex(
                    r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}[+-]\d{4} \[-\] foo'
                )),
                _SplitEliotLogs._ELIOT_LOG_DETAIL_NAME: match_text_content(
                    Contains("  message_type: 'foo'\n"
                             "  name: 'qux'\n")
                ),
            })) 
Example #7
Source File: test_base.py    From flocker with Apache License 2.0 5 votes vote down vote up
def test_logs_after_timeout(self):
        """
        We include logs for tests, even if they time out.
        """
        message_type = MessageType(u'foo', fields(name=str), u'test message')

        class SomeTest(AsyncTestCase):

            # Set the timeout super low, because we're not doing anything.
            run_tests_with = async_runner(timeout=timedelta(seconds=0.00005))

            def test_something(self):
                from twisted.python import log
                log.msg('foo')
                message_type(name='qux').write()
                # Return a Deferred that never fires to guarantee a timeout.
                return Deferred()

        test = SomeTest('test_something')
        result = TestResult()
        test.run(result)
        self.assertThat(
            result,
            has_results(
                tests_run=Equals(1),
                errors=MatchesListwise([MatchesListwise([
                    Equals(test),
                    MatchesAll(
                        Contains('[-] foo\n'),
                        Contains("message_type: 'foo'"),
                    ),
                ])]),
            )
        ) 
Example #8
Source File: _flaky.py    From flocker with Apache License 2.0 4 votes vote down vote up
def _run_flaky_test(self, case, result, flaky):
        """
        Run a test that has been decorated with the `@flaky` decorator.

        :param TestCase case: A ``testtools.TestCase`` to run.
        :param TestResult result: A ``TestResult`` object that conforms to the
            testtools extended result interface.
        :param _FlakyAnnotation flaky: A description of the conditions of
            flakiness.

        :return: A ``TestResult`` with the result of running the flaky test.
        """
        result.startTest(case)
        successes = 0
        results = []

        # Optimization to stop running early if there's no way that we can
        # reach the minimum number of successes.
        max_fails = flaky.max_runs - flaky.min_passes
        while (successes < flaky.min_passes and
               len(results) - successes <= max_fails):
            was_successful, result_type, details = self._attempt_test(case)
            if was_successful:
                successes += 1
            results.append((result_type, details))
        successful = successes >= flaky.min_passes

        flaky_data = flaky.to_dict()
        flaky_data.update({'runs': len(results), 'passes': successes})
        flaky_details = {
            'flaky': text_content(pformat(flaky_data)),
        }
        combined_details = _combine_details(
            [flaky_details] + list(r[1] for r in results))

        if successful:
            skip_reported = False
            for result_type, details in results:
                if result_type == _ResultType.skip:
                    result.addSkip(case, details=details)
                    skip_reported = True

            if not skip_reported:
                Message.new(
                    message_type=u"flocker:test:flaky",
                    id=case.id(),
                    successes=successes,
                    passes=len(results),
                    min_passes=flaky.min_passes,
                    max_runs=flaky.max_runs,
                ).write()
                result.addSuccess(case, details=combined_details)
        else:
            # XXX: How are we going to report on tests that sometimes fail,
            # sometimes error, sometimes skip? Currently we just error.
            result.addError(case, details=combined_details)
        result.stopTest(case)
        return result