Python doctest.ELLIPSIS Examples

The following are 30 code examples of doctest.ELLIPSIS(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module doctest , or try the search function .
Example #1
Source File: udf.py    From LearningApacheSpark with MIT License 6 votes vote down vote up
def _test():
    import doctest
    from pyspark.sql import SparkSession
    import pyspark.sql.udf
    globs = pyspark.sql.udf.__dict__.copy()
    spark = SparkSession.builder\
        .master("local[4]")\
        .appName("sql.udf tests")\
        .getOrCreate()
    globs['spark'] = spark
    (failure_count, test_count) = doctest.testmod(
        pyspark.sql.udf, globs=globs,
        optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
    spark.stop()
    if failure_count:
        sys.exit(-1) 
Example #2
Source File: tests.py    From google-searchconsole with MIT License 6 votes vote down vote up
def load_tests(loader, tests, ignore):
    """ Many docstrings contain doctests. Instead of using a separate doctest
    runner, we use doctest's Unittest API."""
    account = searchconsole.authenticate(
        client_config='auth/client_secrets.json',
        credentials='auth/credentials.dat'
    )

    globs = {
        'account': account,
        'webproperty': account[webproperty_uri],
        'www_webproperty_com': webproperty_uri,
        'query': account[webproperty_uri].query
    }

    kwargs = {
        'globs': globs,
        'optionflags': doctest.ELLIPSIS
    }

    tests.addTests(doctest.DocTestSuite(searchconsole.auth, **kwargs))
    tests.addTests(doctest.DocTestSuite(searchconsole.account, **kwargs))
    tests.addTests(doctest.DocTestSuite(searchconsole.query, **kwargs))

    return tests 
Example #3
Source File: session.py    From LearningApacheSpark with MIT License 6 votes vote down vote up
def _test():
    import os
    import doctest
    from pyspark.context import SparkContext
    from pyspark.sql import Row
    import pyspark.sql.session

    os.chdir(os.environ["SPARK_HOME"])

    globs = pyspark.sql.session.__dict__.copy()
    sc = SparkContext('local[4]', 'PythonTest')
    globs['sc'] = sc
    globs['spark'] = SparkSession(sc)
    globs['rdd'] = rdd = sc.parallelize(
        [Row(field1=1, field2="row1"),
         Row(field1=2, field2="row2"),
         Row(field1=3, field2="row3")])
    globs['df'] = rdd.toDF()
    (failure_count, test_count) = doctest.testmod(
        pyspark.sql.session, globs=globs,
        optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
    globs['sc'].stop()
    if failure_count:
        sys.exit(-1) 
Example #4
Source File: test_integration.py    From cr8 with MIT License 6 votes vote down vote up
def load_tests(loader, tests, ignore):
    env = os.environ.copy()
    env['CR8_NO_TQDM'] = 'True'
    node.start()
    assert node.http_host, "http_url must be available"
    tests.addTests(doctest.DocFileSuite(
        os.path.join('..', 'README.rst'),
        globs={
            'sh': functools.partial(
                subprocess.run,
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT,
                timeout=60,
                shell=True,
                env=env
            )
        },
        optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS,
        setUp=setup,
        tearDown=teardown,
        parser=Parser()
    ))
    return tests 
Example #5
Source File: column.py    From LearningApacheSpark with MIT License 6 votes vote down vote up
def _test():
    import doctest
    from pyspark.sql import SparkSession
    import pyspark.sql.column
    globs = pyspark.sql.column.__dict__.copy()
    spark = SparkSession.builder\
        .master("local[4]")\
        .appName("sql.column tests")\
        .getOrCreate()
    sc = spark.sparkContext
    globs['spark'] = spark
    globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
        .toDF(StructType([StructField('age', IntegerType()),
                          StructField('name', StringType())]))

    (failure_count, test_count) = doctest.testmod(
        pyspark.sql.column, globs=globs,
        optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
    spark.stop()
    if failure_count:
        sys.exit(-1) 
Example #6
Source File: ipdoctest.py    From Computable with MIT License 6 votes vote down vote up
def makeTest(self, obj, parent):
        """Look for doctests in the given object, which will be a
        function, method or class.
        """
        #print 'Plugin analyzing:', obj, parent  # dbg
        # always use whitespace and ellipsis options
        optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS

        doctests = self.finder.find(obj, module=getmodule(parent))
        if doctests:
            for test in doctests:
                if len(test.examples) == 0:
                    continue

                yield DocTestCase(test, obj=obj,
                                  optionflags=optionflags,
                                  checker=self.checker) 
Example #7
Source File: test_matchers.py    From pth-toolkit with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_describe_non_ascii_bytes(self):
        """Even with bytestrings, the mismatch should be coercible to unicode

        DocTestMatches is intended for text, but the Python 2 str type also
        permits arbitrary binary inputs. This is a slightly bogus thing to do,
        and under Python 3 using bytes objects will reasonably raise an error.
        """
        header = _b("\x89PNG\r\n\x1a\n...")
        if str_is_unicode:
            self.assertRaises(TypeError,
                DocTestMatches, header, doctest.ELLIPSIS)
            return
        matcher = DocTestMatches(header, doctest.ELLIPSIS)
        mismatch = matcher.match(_b("GIF89a\1\0\1\0\0\0\0;"))
        # Must be treatable as unicode text, the exact output matters less
        self.assertTrue(unicode(mismatch.describe())) 
Example #8
Source File: test_testresult.py    From pth-toolkit with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def test_traceback_formatting_without_stack_hidden(self):
        # During the testtools test run, we show our levels of the stack,
        # because we want to be able to use our test suite to debug our own
        # code.
        result = self.makeResult()
        test = make_erroring_test()
        test.run(result)
        self.assertThat(
            result.errors[0][1],
            DocTestMatches(
                'Traceback (most recent call last):\n'
                '  File "...testtools...runtest.py", line ..., in _run_user\n'
                '    return fn(*args, **kwargs)\n'
                '  File "...testtools...testcase.py", line ..., in _run_test_method\n'
                '    return self._get_test_method()()\n'
                '  File "...testtools...tests...test_testresult.py", line ..., in error\n'
                '    1/0\n'
                'ZeroDivisionError: ...\n',
                doctest.ELLIPSIS | doctest.REPORT_UDIFF)) 
Example #9
Source File: util.py    From LearningApacheSpark with MIT License 6 votes vote down vote up
def _test():
    import doctest
    from pyspark.sql import SparkSession
    globs = globals().copy()
    # The small batch size here ensures that we see multiple batches,
    # even in these small test examples:
    spark = SparkSession.builder\
        .master("local[2]")\
        .appName("mllib.util tests")\
        .getOrCreate()
    globs['spark'] = spark
    globs['sc'] = spark.sparkContext
    (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
    spark.stop()
    if failure_count:
        sys.exit(-1) 
Example #10
Source File: fpm.py    From LearningApacheSpark with MIT License 6 votes vote down vote up
def _test():
    import doctest
    from pyspark.sql import SparkSession
    import pyspark.mllib.fpm
    globs = pyspark.mllib.fpm.__dict__.copy()
    spark = SparkSession.builder\
        .master("local[4]")\
        .appName("mllib.fpm tests")\
        .getOrCreate()
    globs['sc'] = spark.sparkContext
    import tempfile

    temp_path = tempfile.mkdtemp()
    globs['temp_path'] = temp_path
    try:
        (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
        spark.stop()
    finally:
        from shutil import rmtree
        try:
            rmtree(temp_path)
        except OSError:
            pass
    if failure_count:
        sys.exit(-1) 
Example #11
Source File: functions.py    From LearningApacheSpark with MIT License 6 votes vote down vote up
def _test():
    import doctest
    from pyspark.sql import Row, SparkSession
    import pyspark.sql.functions
    globs = pyspark.sql.functions.__dict__.copy()
    spark = SparkSession.builder\
        .master("local[4]")\
        .appName("sql.functions tests")\
        .getOrCreate()
    sc = spark.sparkContext
    globs['sc'] = sc
    globs['spark'] = spark
    globs['df'] = spark.createDataFrame([Row(name='Alice', age=2), Row(name='Bob', age=5)])
    (failure_count, test_count) = doctest.testmod(
        pyspark.sql.functions, globs=globs,
        optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
    spark.stop()
    if failure_count:
        sys.exit(-1) 
Example #12
Source File: _statistics.py    From LearningApacheSpark with MIT License 6 votes vote down vote up
def _test():
    import doctest
    import numpy
    from pyspark.sql import SparkSession
    try:
        # Numpy 1.14+ changed it's string format.
        numpy.set_printoptions(legacy='1.13')
    except TypeError:
        pass
    globs = globals().copy()
    spark = SparkSession.builder\
        .master("local[4]")\
        .appName("mllib.stat.statistics tests")\
        .getOrCreate()
    globs['sc'] = spark.sparkContext
    (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
    spark.stop()
    if failure_count:
        sys.exit(-1) 
Example #13
Source File: __init__.py    From fungrim with MIT License 6 votes vote down vote up
def test():
    import doctest

    print("----------------------------------------------------------")
    print("expr")
    print("----------------------------------------------------------")
    doctest.testmod(expr, verbose=True, raise_on_error=False, optionflags=doctest.ELLIPSIS)
    expr.TestExpr().run()

    print("----------------------------------------------------------")
    print("algebraic")
    print("----------------------------------------------------------")
    doctest.testmod(algebraic, verbose=True, raise_on_error=True, optionflags=doctest.ELLIPSIS)
    algebraic.TestAlgebraic().run()

    print("----------------------------------------------------------")
    print("brain")
    print("----------------------------------------------------------")
    doctest.testmod(brain, verbose=True, raise_on_error=True, optionflags=doctest.ELLIPSIS)
    TestBrain().run() 
Example #14
Source File: session.py    From tidb-docker-compose with Apache License 2.0 6 votes vote down vote up
def _test():
    import os
    import doctest
    from pyspark.context import SparkContext
    from pyspark.sql import Row
    import pyspark.sql.session

    os.chdir(os.environ["SPARK_HOME"])

    globs = pyspark.sql.session.__dict__.copy()
    sc = SparkContext('local[4]', 'PythonTest')
    globs['sc'] = sc
    globs['spark'] = SparkSession(sc)
    globs['rdd'] = rdd = sc.parallelize(
        [Row(field1=1, field2="row1"),
         Row(field1=2, field2="row2"),
         Row(field1=3, field2="row3")])
    globs['df'] = rdd.toDF()
    (failure_count, test_count) = doctest.testmod(
        pyspark.sql.session, globs=globs,
        optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
    globs['sc'].stop()
    if failure_count:
        exit(-1) 
Example #15
Source File: session.py    From tidb-docker-compose with Apache License 2.0 6 votes vote down vote up
def _test():
    import os
    import doctest
    from pyspark.context import SparkContext
    from pyspark.sql import Row
    import pyspark.sql.session

    os.chdir(os.environ["SPARK_HOME"])

    globs = pyspark.sql.session.__dict__.copy()
    sc = SparkContext('local[4]', 'PythonTest')
    globs['sc'] = sc
    globs['spark'] = SparkSession(sc)
    globs['rdd'] = rdd = sc.parallelize(
        [Row(field1=1, field2="row1"),
         Row(field1=2, field2="row2"),
         Row(field1=3, field2="row3")])
    globs['df'] = rdd.toDF()
    (failure_count, test_count) = doctest.testmod(
        pyspark.sql.session, globs=globs,
        optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
    globs['sc'].stop()
    if failure_count:
        sys.exit(-1) 
Example #16
Source File: test_doctests.py    From anyMesh-Python with MIT License 6 votes vote down vote up
def load_tests(loader, tests, ignore):
    module_doctests = [
        urwid.widget,
        urwid.wimp,
        urwid.decoration,
        urwid.display_common,
        urwid.main_loop,
        urwid.monitored_list,
        urwid.raw_display,
        'urwid.split_repr', # override function with same name
        urwid.util,
        urwid.signals,
        ]
    for m in module_doctests:
        tests.addTests(doctest.DocTestSuite(m,
            optionflags=doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL))
    return tests 
Example #17
Source File: main.py    From pg_simple with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_basic_functions(self):
        import code
        import doctest
        import sys

        db = pg_simple.PgSimple(self.pool)
        if sys.argv.count('--interact'):
            db.log = sys.stdout
            code.interact(local=locals())
        else:
            try:
                # Setup tables
                self._drop_tables(db)
                self._create_tables(db, fill=True)
                # Run tests
                doctest.testmod(optionflags=doctest.ELLIPSIS)
            finally:
                # Drop tables
                self._drop_tables(db)
        self.assertEqual(True, True) 
Example #18
Source File: test_testresult.py    From pth-toolkit with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_stopTestRun_successful(self):
        self.result.startTestRun()
        self.result.stopTestRun()
        self.assertThat(self.getvalue(),
            DocTestMatches("...\nOK\n", doctest.ELLIPSIS)) 
Example #19
Source File: test_testresult.py    From pth-toolkit with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_stopTestRun_count_zero(self):
        self.result.startTestRun()
        self.reset_output()
        self.result.stopTestRun()
        self.assertThat(self.getvalue(),
            DocTestMatches("\nRan 0 tests in ...s\nOK\n", doctest.ELLIPSIS)) 
Example #20
Source File: test_testresult.py    From pth-toolkit with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_stopTestRun_not_successful_error(self):
        test = make_erroring_test()
        self.result.startTestRun()
        test.run(self.result)
        self.result.stopTestRun()
        self.assertThat(self.getvalue(),
            DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS)) 
Example #21
Source File: test_testresult.py    From pth-toolkit with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_stopTestRun_current_time(self):
        test = make_test()
        now = datetime.datetime.now(utc)
        self.result.time(now)
        self.result.startTestRun()
        self.result.startTest(test)
        now = now + datetime.timedelta(0, 0, 0, 1)
        self.result.time(now)
        self.result.stopTest(test)
        self.reset_output()
        self.result.stopTestRun()
        self.assertThat(self.getvalue(),
            DocTestMatches("... in 0.001s\n...", doctest.ELLIPSIS)) 
Example #22
Source File: test_doctest.py    From ironpython3 with Apache License 2.0 5 votes vote down vote up
def test_unicode(): """
Check doctest with a non-ascii filename:

    >>> doc = '''
    ... >>> raise Exception('clé')
    ... '''
    ...
    >>> parser = doctest.DocTestParser()
    >>> test = parser.get_doctest(doc, {}, "foo-bär@baz", "foo-bär@baz.py", 0)
    >>> test
    <DocTest foo-bär@baz from foo-bär@baz.py:0 (1 example)>
    >>> runner = doctest.DocTestRunner(verbose=False)
    >>> runner.run(test) # doctest: +ELLIPSIS
    **********************************************************************
    File "foo-bär@baz.py", line 2, in foo-bär@baz
    Failed example:
        raise Exception('clé')
    Exception raised:
        Traceback (most recent call last):
          File ...
            compileflags, 1), test.globs)
          File "<doctest foo-bär@baz[0]>", line 1, in <module>
            raise Exception('clé')
        Exception: clé
    TestResults(failed=1, attempted=1)
    """ 
Example #23
Source File: __init__.py    From LearningApacheSpark with MIT License 5 votes vote down vote up
def _test():
    import doctest
    import numpy
    try:
        # Numpy 1.14+ changed it's string format.
        numpy.set_printoptions(legacy='1.13')
    except TypeError:
        pass
    (failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS)
    if failure_count:
        sys.exit(-1) 
Example #24
Source File: types.py    From LearningApacheSpark with MIT License 5 votes vote down vote up
def _test():
    import doctest
    from pyspark.context import SparkContext
    from pyspark.sql import SparkSession
    globs = globals()
    sc = SparkContext('local[4]', 'PythonTest')
    globs['sc'] = sc
    globs['spark'] = SparkSession.builder.getOrCreate()
    (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
    globs['sc'].stop()
    if failure_count:
        sys.exit(-1) 
Example #25
Source File: readwriter.py    From LearningApacheSpark with MIT License 5 votes vote down vote up
def _test():
    import doctest
    import os
    import tempfile
    import py4j
    from pyspark.context import SparkContext
    from pyspark.sql import SparkSession, Row
    import pyspark.sql.readwriter

    os.chdir(os.environ["SPARK_HOME"])

    globs = pyspark.sql.readwriter.__dict__.copy()
    sc = SparkContext('local[4]', 'PythonTest')
    try:
        spark = SparkSession.builder.getOrCreate()
    except py4j.protocol.Py4JError:
        spark = SparkSession(sc)

    globs['tempfile'] = tempfile
    globs['os'] = os
    globs['sc'] = sc
    globs['spark'] = spark
    globs['df'] = spark.read.parquet('python/test_support/sql/parquet_partitioned')
    (failure_count, test_count) = doctest.testmod(
        pyspark.sql.readwriter, globs=globs,
        optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
    sc.stop()
    if failure_count:
        sys.exit(-1) 
Example #26
Source File: group.py    From LearningApacheSpark with MIT License 5 votes vote down vote up
def _test():
    import doctest
    from pyspark.sql import Row, SparkSession
    import pyspark.sql.group
    globs = pyspark.sql.group.__dict__.copy()
    spark = SparkSession.builder\
        .master("local[4]")\
        .appName("sql.group tests")\
        .getOrCreate()
    sc = spark.sparkContext
    globs['sc'] = sc
    globs['spark'] = spark
    globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
        .toDF(StructType([StructField('age', IntegerType()),
                          StructField('name', StringType())]))
    globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80),
                                   Row(name='Bob', age=5, height=85)]).toDF()
    globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000),
                                   Row(course="Java",   year=2012, earnings=20000),
                                   Row(course="dotNET", year=2012, earnings=5000),
                                   Row(course="dotNET", year=2013, earnings=48000),
                                   Row(course="Java",   year=2013, earnings=30000)]).toDF()
    globs['df5'] = sc.parallelize([
        Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=10000)),
        Row(training="junior", sales=Row(course="Java",   year=2012, earnings=20000)),
        Row(training="expert", sales=Row(course="dotNET", year=2012, earnings=5000)),
        Row(training="junior", sales=Row(course="dotNET", year=2013, earnings=48000)),
        Row(training="expert", sales=Row(course="Java",   year=2013, earnings=30000))]).toDF()

    (failure_count, test_count) = doctest.testmod(
        pyspark.sql.group, globs=globs,
        optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
    spark.stop()
    if failure_count:
        sys.exit(-1) 
Example #27
Source File: context.py    From LearningApacheSpark with MIT License 5 votes vote down vote up
def _test():
    import os
    import doctest
    import tempfile
    from pyspark.context import SparkContext
    from pyspark.sql import Row, SQLContext
    import pyspark.sql.context

    os.chdir(os.environ["SPARK_HOME"])

    globs = pyspark.sql.context.__dict__.copy()
    sc = SparkContext('local[4]', 'PythonTest')
    globs['tempfile'] = tempfile
    globs['os'] = os
    globs['sc'] = sc
    globs['sqlContext'] = SQLContext(sc)
    globs['rdd'] = rdd = sc.parallelize(
        [Row(field1=1, field2="row1"),
         Row(field1=2, field2="row2"),
         Row(field1=3, field2="row3")]
    )
    globs['df'] = rdd.toDF()
    jsonStrings = [
        '{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
        '{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
        '"field6":[{"field7": "row2"}]}',
        '{"field1" : null, "field2": "row3", '
        '"field3":{"field4":33, "field5": []}}'
    ]
    globs['jsonStrings'] = jsonStrings
    globs['json'] = sc.parallelize(jsonStrings)
    (failure_count, test_count) = doctest.testmod(
        pyspark.sql.context, globs=globs,
        optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
    globs['sc'].stop()
    if failure_count:
        sys.exit(-1) 
Example #28
Source File: dataframe.py    From LearningApacheSpark with MIT License 5 votes vote down vote up
def _test():
    import doctest
    from pyspark.context import SparkContext
    from pyspark.sql import Row, SQLContext, SparkSession
    import pyspark.sql.dataframe
    from pyspark.sql.functions import from_unixtime
    globs = pyspark.sql.dataframe.__dict__.copy()
    sc = SparkContext('local[4]', 'PythonTest')
    globs['sc'] = sc
    globs['sqlContext'] = SQLContext(sc)
    globs['spark'] = SparkSession(sc)
    globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
        .toDF(StructType([StructField('age', IntegerType()),
                          StructField('name', StringType())]))
    globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF()
    globs['df3'] = sc.parallelize([Row(name='Alice', age=2),
                                   Row(name='Bob', age=5)]).toDF()
    globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80),
                                   Row(name='Bob', age=5, height=None),
                                   Row(name='Tom', age=None, height=None),
                                   Row(name=None, age=None, height=None)]).toDF()
    globs['df5'] = sc.parallelize([Row(name='Alice', spy=False, age=10),
                                   Row(name='Bob', spy=None, age=5),
                                   Row(name='Mallory', spy=True, age=None)]).toDF()
    globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
                                   Row(name='Bob', time=1479442946)]).toDF()

    (failure_count, test_count) = doctest.testmod(
        pyspark.sql.dataframe, globs=globs,
        optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
    globs['sc'].stop()
    if failure_count:
        sys.exit(-1) 
Example #29
Source File: streaming.py    From LearningApacheSpark with MIT License 5 votes vote down vote up
def _test():
    import doctest
    import os
    import tempfile
    from pyspark.sql import Row, SparkSession, SQLContext
    import pyspark.sql.streaming

    os.chdir(os.environ["SPARK_HOME"])

    globs = pyspark.sql.streaming.__dict__.copy()
    try:
        spark = SparkSession.builder.getOrCreate()
    except py4j.protocol.Py4JError:
        spark = SparkSession(sc)

    globs['tempfile'] = tempfile
    globs['os'] = os
    globs['spark'] = spark
    globs['sqlContext'] = SQLContext.getOrCreate(spark.sparkContext)
    globs['sdf'] = \
        spark.readStream.format('text').load('python/test_support/sql/streaming')
    globs['sdf_schema'] = StructType([StructField("data", StringType(), False)])
    globs['df'] = \
        globs['spark'].readStream.format('text').load('python/test_support/sql/streaming')

    (failure_count, test_count) = doctest.testmod(
        pyspark.sql.streaming, globs=globs,
        optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
    globs['spark'].stop()

    if failure_count:
        sys.exit(-1) 
Example #30
Source File: json_test.py    From cassandra-dtest with Apache License 2.0 5 votes vote down vote up
def run_func_docstring(tester, test_func, globs=None, verbose=False, compileflags=None, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE):
    """
    Similar to doctest.run_docstring_examples, but takes a single function/bound method,
    extracts it's singular docstring (no looking for subobjects with tests),
    runs it, and most importantly raises an exception if the test doesn't pass.

    tester should be an instance of dtest.Tester
    test_func should be a function/bound method the docstring to be tested
    """
    name = test_func.__name__

    if globs is None:
        globs = build_doc_context(tester, name)

    # dumb function that remembers values that it is called with
    # the DocTestRunner.run function called below accepts a callable for logging
    # and this is a hacky but easy way to capture the nicely formatted value for reporting
    def test_output_capturer(content):
        if not hasattr(test_output_capturer, 'content'):
            test_output_capturer.content = ''

        test_output_capturer.content += content

    test = doctest.DocTestParser().get_doctest(inspect.getdoc(test_func), globs, name, None, None)
    runner = doctest.DocTestRunner(verbose=verbose, optionflags=optionflags)
    runner.run(test, out=test_output_capturer, compileflags=compileflags)

    failed, attempted = runner.summarize()

    if failed > 0:
        raise RuntimeError("Doctest failed! Captured output:\n{}".format(test_output_capturer.content))

    if failed + attempted == 0:
        raise RuntimeError("No tests were run!")