whoami7 - Manager
:
/
home
/
creaupfw
/
public_html
/
wp-includes
/
assets
/
Upload File:
files >> //home/creaupfw/public_html/wp-includes/assets/unittest.zip
PK �q�Zd��_ _ util.pynu �[��� """Various utility functions.""" from collections import namedtuple, Counter from os.path import commonprefix __unittest = True _MAX_LENGTH = 80 _PLACEHOLDER_LEN = 12 _MIN_BEGIN_LEN = 5 _MIN_END_LEN = 5 _MIN_COMMON_LEN = 5 _MIN_DIFF_LEN = _MAX_LENGTH - \ (_MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN + _PLACEHOLDER_LEN + _MIN_END_LEN) assert _MIN_DIFF_LEN >= 0 def _shorten(s, prefixlen, suffixlen): skip = len(s) - prefixlen - suffixlen if skip > _PLACEHOLDER_LEN: s = '%s[%d chars]%s' % (s[:prefixlen], skip, s[len(s) - suffixlen:]) return s def _common_shorten_repr(*args): args = tuple(map(safe_repr, args)) maxlen = max(map(len, args)) if maxlen <= _MAX_LENGTH: return args prefix = commonprefix(args) prefixlen = len(prefix) common_len = _MAX_LENGTH - \ (maxlen - prefixlen + _MIN_BEGIN_LEN + _PLACEHOLDER_LEN) if common_len > _MIN_COMMON_LEN: assert _MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN + \ (maxlen - prefixlen) < _MAX_LENGTH prefix = _shorten(prefix, _MIN_BEGIN_LEN, common_len) return tuple(prefix + s[prefixlen:] for s in args) prefix = _shorten(prefix, _MIN_BEGIN_LEN, _MIN_COMMON_LEN) return tuple(prefix + _shorten(s[prefixlen:], _MIN_DIFF_LEN, _MIN_END_LEN) for s in args) def safe_repr(obj, short=False): try: result = repr(obj) except Exception: result = object.__repr__(obj) if not short or len(result) < _MAX_LENGTH: return result return result[:_MAX_LENGTH] + ' [truncated]...' def strclass(cls): return "%s.%s" % (cls.__module__, cls.__qualname__) def sorted_list_difference(expected, actual): """Finds elements in only one or the other of two, sorted input lists. Returns a two-element tuple of lists. The first list contains those elements in the "expected" list but not in the "actual" list, and the second contains those elements in the "actual" list but not in the "expected" list. Duplicate elements in either input list are ignored. """ i = j = 0 missing = [] unexpected = [] while True: try: e = expected[i] a = actual[j] if e < a: missing.append(e) i += 1 while expected[i] == e: i += 1 elif e > a: unexpected.append(a) j += 1 while actual[j] == a: j += 1 else: i += 1 try: while expected[i] == e: i += 1 finally: j += 1 while actual[j] == a: j += 1 except IndexError: missing.extend(expected[i:]) unexpected.extend(actual[j:]) break return missing, unexpected def unorderable_list_difference(expected, actual): """Same behavior as sorted_list_difference but for lists of unorderable items (like dicts). As it does a linear search per item (remove) it has O(n*n) performance.""" missing = [] while expected: item = expected.pop() try: actual.remove(item) except ValueError: missing.append(item) # anything left in actual is unexpected return missing, actual def three_way_cmp(x, y): """Return -1 if x < y, 0 if x == y and 1 if x > y""" return (x > y) - (x < y) _Mismatch = namedtuple('Mismatch', 'actual expected value') def _count_diff_all_purpose(actual, expected): 'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ' # elements need not be hashable s, t = list(actual), list(expected) m, n = len(s), len(t) NULL = object() result = [] for i, elem in enumerate(s): if elem is NULL: continue cnt_s = cnt_t = 0 for j in range(i, m): if s[j] == elem: cnt_s += 1 s[j] = NULL for j, other_elem in enumerate(t): if other_elem == elem: cnt_t += 1 t[j] = NULL if cnt_s != cnt_t: diff = _Mismatch(cnt_s, cnt_t, elem) result.append(diff) for i, elem in enumerate(t): if elem is NULL: continue cnt_t = 0 for j in range(i, n): if t[j] == elem: cnt_t += 1 t[j] = NULL diff = _Mismatch(0, cnt_t, elem) result.append(diff) return result def _count_diff_hashable(actual, expected): 'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ' # elements must be hashable s, t = Counter(actual), Counter(expected) result = [] for elem, cnt_s in s.items(): cnt_t = t.get(elem, 0) if cnt_s != cnt_t: diff = _Mismatch(cnt_s, cnt_t, elem) result.append(diff) for elem, cnt_t in t.items(): if elem not in s: diff = _Mismatch(0, cnt_t, elem) result.append(diff) return result PK �q�Z��� � __init__.pynu �[��� """ Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's Smalltalk testing framework (used with permission). This module contains the core framework classes that form the basis of specific test cases and suites (TestCase, TestSuite etc.), and also a text-based utility class for running the tests and reporting the results (TextTestRunner). Simple usage: import unittest class IntegerArithmeticTestCase(unittest.TestCase): def testAdd(self): # test method names begin with 'test' self.assertEqual((1 + 2), 3) self.assertEqual(0 + 1, 1) def testMultiply(self): self.assertEqual((0 * 10), 0) self.assertEqual((5 * 8), 40) if __name__ == '__main__': unittest.main() Further information is available in the bundled documentation, and from http://docs.python.org/library/unittest.html Copyright (c) 1999-2003 Steve Purcell Copyright (c) 2003-2010 Python Software Foundation This module is free software, and you may redistribute it and/or modify it under the same terms as Python itself, so long as this copyright message and disclaimer are retained in their original form. IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. """ __all__ = ['TestResult', 'TestCase', 'IsolatedAsyncioTestCase', 'TestSuite', 'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main', 'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless', 'expectedFailure', 'TextTestResult', 'installHandler', 'registerResult', 'removeResult', 'removeHandler', 'addModuleCleanup'] # Expose obsolete functions for backwards compatibility __all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases']) __unittest = True from .result import TestResult from .async_case import IsolatedAsyncioTestCase from .case import (addModuleCleanup, TestCase, FunctionTestCase, SkipTest, skip, skipIf, skipUnless, expectedFailure) from .suite import BaseTestSuite, TestSuite from .loader import (TestLoader, defaultTestLoader, makeSuite, getTestCaseNames, findTestCases) from .main import TestProgram, main from .runner import TextTestRunner, TextTestResult from .signals import installHandler, registerResult, removeResult, removeHandler # deprecated _TextTestResult = TextTestResult # There are no tests here, so don't try to run anything discovered from # introspecting the symbols (e.g. FunctionTestCase). Instead, all our # tests come from within unittest.test. def load_tests(loader, tests, pattern): import os.path # top level directory cached on loader instance this_dir = os.path.dirname(__file__) return loader.discover(start_dir=this_dir, pattern=pattern) PK �q�Zڗ��c c signals.pynu �[��� import signal import weakref from functools import wraps __unittest = True class _InterruptHandler(object): def __init__(self, default_handler): self.called = False self.original_handler = default_handler if isinstance(default_handler, int): if default_handler == signal.SIG_DFL: # Pretend it's signal.default_int_handler instead. default_handler = signal.default_int_handler elif default_handler == signal.SIG_IGN: # Not quite the same thing as SIG_IGN, but the closest we # can make it: do nothing. def default_handler(unused_signum, unused_frame): pass else: raise TypeError("expected SIGINT signal handler to be " "signal.SIG_IGN, signal.SIG_DFL, or a " "callable object") self.default_handler = default_handler def __call__(self, signum, frame): installed_handler = signal.getsignal(signal.SIGINT) if installed_handler is not self: # if we aren't the installed handler, then delegate immediately # to the default handler self.default_handler(signum, frame) if self.called: self.default_handler(signum, frame) self.called = True for result in _results.keys(): result.stop() _results = weakref.WeakKeyDictionary() def registerResult(result): _results[result] = 1 def removeResult(result): return bool(_results.pop(result, None)) _interrupt_handler = None def installHandler(): global _interrupt_handler if _interrupt_handler is None: default_handler = signal.getsignal(signal.SIGINT) _interrupt_handler = _InterruptHandler(default_handler) signal.signal(signal.SIGINT, _interrupt_handler) def removeHandler(method=None): if method is not None: @wraps(method) def inner(*args, **kwargs): initial = signal.getsignal(signal.SIGINT) removeHandler() try: return method(*args, **kwargs) finally: signal.signal(signal.SIGINT, initial) return inner global _interrupt_handler if _interrupt_handler is not None: signal.signal(signal.SIGINT, _interrupt_handler.original_handler) PK �q�Z�b!%� � __main__.pynu �[��� """Main entry point""" import sys if sys.argv[0].endswith("__main__.py"): import os.path # We change sys.argv[0] to make help message more useful # use executable without path, unquoted # (it's just a hint anyway) # (if you have spaces in your executable you get what you deserve!) executable = os.path.basename(sys.executable) sys.argv[0] = executable + " -m unittest" del os __unittest = True from .main import main main(module=None) PK �q�ZV�q� � async_case.pynu �[��� import asyncio import inspect from .case import TestCase class IsolatedAsyncioTestCase(TestCase): # Names intentionally have a long prefix # to reduce a chance of clashing with user-defined attributes # from inherited test case # # The class doesn't call loop.run_until_complete(self.setUp()) and family # but uses a different approach: # 1. create a long-running task that reads self.setUp() # awaitable from queue along with a future # 2. await the awaitable object passing in and set the result # into the future object # 3. Outer code puts the awaitable and the future object into a queue # with waiting for the future # The trick is necessary because every run_until_complete() call # creates a new task with embedded ContextVar context. # To share contextvars between setUp(), test and tearDown() we need to execute # them inside the same task. # Note: the test case modifies event loop policy if the policy was not instantiated # yet. # asyncio.get_event_loop_policy() creates a default policy on demand but never # returns None # I believe this is not an issue in user level tests but python itself for testing # should reset a policy in every test module # by calling asyncio.set_event_loop_policy(None) in tearDownModule() def __init__(self, methodName='runTest'): super().__init__(methodName) self._asyncioTestLoop = None self._asyncioCallsQueue = None async def asyncSetUp(self): pass async def asyncTearDown(self): pass def addAsyncCleanup(self, func, /, *args, **kwargs): # A trivial trampoline to addCleanup() # the function exists because it has a different semantics # and signature: # addCleanup() accepts regular functions # but addAsyncCleanup() accepts coroutines # # We intentionally don't add inspect.iscoroutinefunction() check # for func argument because there is no way # to check for async function reliably: # 1. It can be "async def func()" iself # 2. Class can implement "async def __call__()" method # 3. Regular "def func()" that returns awaitable object self.addCleanup(*(func, *args), **kwargs) def _callSetUp(self): self.setUp() self._callAsync(self.asyncSetUp) def _callTestMethod(self, method): self._callMaybeAsync(method) def _callTearDown(self): self._callAsync(self.asyncTearDown) self.tearDown() def _callCleanup(self, function, *args, **kwargs): self._callMaybeAsync(function, *args, **kwargs) def _callAsync(self, func, /, *args, **kwargs): assert self._asyncioTestLoop is not None ret = func(*args, **kwargs) assert inspect.isawaitable(ret) fut = self._asyncioTestLoop.create_future() self._asyncioCallsQueue.put_nowait((fut, ret)) return self._asyncioTestLoop.run_until_complete(fut) def _callMaybeAsync(self, func, /, *args, **kwargs): assert self._asyncioTestLoop is not None ret = func(*args, **kwargs) if inspect.isawaitable(ret): fut = self._asyncioTestLoop.create_future() self._asyncioCallsQueue.put_nowait((fut, ret)) return self._asyncioTestLoop.run_until_complete(fut) else: return ret async def _asyncioLoopRunner(self, fut): self._asyncioCallsQueue = queue = asyncio.Queue() fut.set_result(None) while True: query = await queue.get() queue.task_done() if query is None: return fut, awaitable = query try: ret = await awaitable if not fut.cancelled(): fut.set_result(ret) except (SystemExit, KeyboardInterrupt): raise except (BaseException, asyncio.CancelledError) as ex: if not fut.cancelled(): fut.set_exception(ex) def _setupAsyncioLoop(self): assert self._asyncioTestLoop is None loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.set_debug(True) self._asyncioTestLoop = loop fut = loop.create_future() self._asyncioCallsTask = loop.create_task(self._asyncioLoopRunner(fut)) loop.run_until_complete(fut) def _tearDownAsyncioLoop(self): assert self._asyncioTestLoop is not None loop = self._asyncioTestLoop self._asyncioTestLoop = None self._asyncioCallsQueue.put_nowait(None) loop.run_until_complete(self._asyncioCallsQueue.join()) try: # cancel all tasks to_cancel = asyncio.all_tasks(loop) if not to_cancel: return for task in to_cancel: task.cancel() loop.run_until_complete( asyncio.gather(*to_cancel, loop=loop, return_exceptions=True)) for task in to_cancel: if task.cancelled(): continue if task.exception() is not None: loop.call_exception_handler({ 'message': 'unhandled exception during test shutdown', 'exception': task.exception(), 'task': task, }) # shutdown asyncgens loop.run_until_complete(loop.shutdown_asyncgens()) finally: asyncio.set_event_loop(None) loop.close() def run(self, result=None): self._setupAsyncioLoop() try: return super().run(result) finally: self._tearDownAsyncioLoop() PK �q�Z��W W runner.pynu �[��� """Running tests""" import sys import time import warnings from . import result from .signals import registerResult __unittest = True class _WritelnDecorator(object): """Used to decorate file-like objects with a handy 'writeln' method""" def __init__(self,stream): self.stream = stream def __getattr__(self, attr): if attr in ('stream', '__getstate__'): raise AttributeError(attr) return getattr(self.stream,attr) def writeln(self, arg=None): if arg: self.write(arg) self.write('\n') # text-mode streams translate to \r\n if needed class TextTestResult(result.TestResult): """A test result class that can print formatted text results to a stream. Used by TextTestRunner. """ separator1 = '=' * 70 separator2 = '-' * 70 def __init__(self, stream, descriptions, verbosity): super(TextTestResult, self).__init__(stream, descriptions, verbosity) self.stream = stream self.showAll = verbosity > 1 self.dots = verbosity == 1 self.descriptions = descriptions def getDescription(self, test): doc_first_line = test.shortDescription() if self.descriptions and doc_first_line: return '\n'.join((str(test), doc_first_line)) else: return str(test) def startTest(self, test): super(TextTestResult, self).startTest(test) if self.showAll: self.stream.write(self.getDescription(test)) self.stream.write(" ... ") self.stream.flush() def addSuccess(self, test): super(TextTestResult, self).addSuccess(test) if self.showAll: self.stream.writeln("ok") elif self.dots: self.stream.write('.') self.stream.flush() def addError(self, test, err): super(TextTestResult, self).addError(test, err) if self.showAll: self.stream.writeln("ERROR") elif self.dots: self.stream.write('E') self.stream.flush() def addFailure(self, test, err): super(TextTestResult, self).addFailure(test, err) if self.showAll: self.stream.writeln("FAIL") elif self.dots: self.stream.write('F') self.stream.flush() def addSkip(self, test, reason): super(TextTestResult, self).addSkip(test, reason) if self.showAll: self.stream.writeln("skipped {0!r}".format(reason)) elif self.dots: self.stream.write("s") self.stream.flush() def addExpectedFailure(self, test, err): super(TextTestResult, self).addExpectedFailure(test, err) if self.showAll: self.stream.writeln("expected failure") elif self.dots: self.stream.write("x") self.stream.flush() def addUnexpectedSuccess(self, test): super(TextTestResult, self).addUnexpectedSuccess(test) if self.showAll: self.stream.writeln("unexpected success") elif self.dots: self.stream.write("u") self.stream.flush() def printErrors(self): if self.dots or self.showAll: self.stream.writeln() self.printErrorList('ERROR', self.errors) self.printErrorList('FAIL', self.failures) def printErrorList(self, flavour, errors): for test, err in errors: self.stream.writeln(self.separator1) self.stream.writeln("%s: %s" % (flavour,self.getDescription(test))) self.stream.writeln(self.separator2) self.stream.writeln("%s" % err) class TextTestRunner(object): """A test runner class that displays results in textual form. It prints out the names of tests as they are run, errors as they occur, and a summary of the results at the end of the test run. """ resultclass = TextTestResult def __init__(self, stream=None, descriptions=True, verbosity=1, failfast=False, buffer=False, resultclass=None, warnings=None, *, tb_locals=False): """Construct a TextTestRunner. Subclasses should accept **kwargs to ensure compatibility as the interface changes. """ if stream is None: stream = sys.stderr self.stream = _WritelnDecorator(stream) self.descriptions = descriptions self.verbosity = verbosity self.failfast = failfast self.buffer = buffer self.tb_locals = tb_locals self.warnings = warnings if resultclass is not None: self.resultclass = resultclass def _makeResult(self): return self.resultclass(self.stream, self.descriptions, self.verbosity) def run(self, test): "Run the given test case or test suite." result = self._makeResult() registerResult(result) result.failfast = self.failfast result.buffer = self.buffer result.tb_locals = self.tb_locals with warnings.catch_warnings(): if self.warnings: # if self.warnings is set, use it to filter all the warnings warnings.simplefilter(self.warnings) # if the filter is 'default' or 'always', special-case the # warnings from the deprecated unittest methods to show them # no more than once per module, because they can be fairly # noisy. The -Wd and -Wa flags can be used to bypass this # only when self.warnings is None. if self.warnings in ['default', 'always']: warnings.filterwarnings('module', category=DeprecationWarning, message=r'Please use assert\w+ instead.') startTime = time.perf_counter() startTestRun = getattr(result, 'startTestRun', None) if startTestRun is not None: startTestRun() try: test(result) finally: stopTestRun = getattr(result, 'stopTestRun', None) if stopTestRun is not None: stopTestRun() stopTime = time.perf_counter() timeTaken = stopTime - startTime result.printErrors() if hasattr(result, 'separator2'): self.stream.writeln(result.separator2) run = result.testsRun self.stream.writeln("Ran %d test%s in %.3fs" % (run, run != 1 and "s" or "", timeTaken)) self.stream.writeln() expectedFails = unexpectedSuccesses = skipped = 0 try: results = map(len, (result.expectedFailures, result.unexpectedSuccesses, result.skipped)) except AttributeError: pass else: expectedFails, unexpectedSuccesses, skipped = results infos = [] if not result.wasSuccessful(): self.stream.write("FAILED") failed, errored = len(result.failures), len(result.errors) if failed: infos.append("failures=%d" % failed) if errored: infos.append("errors=%d" % errored) else: self.stream.write("OK") if skipped: infos.append("skipped=%d" % skipped) if expectedFails: infos.append("expected failures=%d" % expectedFails) if unexpectedSuccesses: infos.append("unexpected successes=%d" % unexpectedSuccesses) if infos: self.stream.writeln(" (%s)" % (", ".join(infos),)) else: self.stream.write("\n") return result PK �q�Z2��|2 2 suite.pynu �[��� """TestSuite""" import sys from . import case from . import util __unittest = True def _call_if_exists(parent, attr): func = getattr(parent, attr, lambda: None) func() class BaseTestSuite(object): """A simple test suite that doesn't provide class or module shared fixtures. """ _cleanup = True def __init__(self, tests=()): self._tests = [] self._removed_tests = 0 self.addTests(tests) def __repr__(self): return "<%s tests=%s>" % (util.strclass(self.__class__), list(self)) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return list(self) == list(other) def __iter__(self): return iter(self._tests) def countTestCases(self): cases = self._removed_tests for test in self: if test: cases += test.countTestCases() return cases def addTest(self, test): # sanity checks if not callable(test): raise TypeError("{} is not callable".format(repr(test))) if isinstance(test, type) and issubclass(test, (case.TestCase, TestSuite)): raise TypeError("TestCases and TestSuites must be instantiated " "before passing them to addTest()") self._tests.append(test) def addTests(self, tests): if isinstance(tests, str): raise TypeError("tests must be an iterable of tests, not a string") for test in tests: self.addTest(test) def run(self, result): for index, test in enumerate(self): if result.shouldStop: break test(result) if self._cleanup: self._removeTestAtIndex(index) return result def _removeTestAtIndex(self, index): """Stop holding a reference to the TestCase at index.""" try: test = self._tests[index] except TypeError: # support for suite implementations that have overridden self._tests pass else: # Some unittest tests add non TestCase/TestSuite objects to # the suite. if hasattr(test, 'countTestCases'): self._removed_tests += test.countTestCases() self._tests[index] = None def __call__(self, *args, **kwds): return self.run(*args, **kwds) def debug(self): """Run the tests without collecting errors in a TestResult""" for test in self: test.debug() class TestSuite(BaseTestSuite): """A test suite is a composite test consisting of a number of TestCases. For use, create an instance of TestSuite, then add test case instances. When all tests have been added, the suite can be passed to a test runner, such as TextTestRunner. It will run the individual test cases in the order in which they were added, aggregating the results. When subclassing, do not forget to call the base class constructor. """ def run(self, result, debug=False): topLevel = False if getattr(result, '_testRunEntered', False) is False: result._testRunEntered = topLevel = True for index, test in enumerate(self): if result.shouldStop: break if _isnotsuite(test): self._tearDownPreviousClass(test, result) self._handleModuleFixture(test, result) self._handleClassSetUp(test, result) result._previousTestClass = test.__class__ if (getattr(test.__class__, '_classSetupFailed', False) or getattr(result, '_moduleSetUpFailed', False)): continue if not debug: test(result) else: test.debug() if self._cleanup: self._removeTestAtIndex(index) if topLevel: self._tearDownPreviousClass(None, result) self._handleModuleTearDown(result) result._testRunEntered = False return result def debug(self): """Run the tests without collecting errors in a TestResult""" debug = _DebugResult() self.run(debug, True) ################################ def _handleClassSetUp(self, test, result): previousClass = getattr(result, '_previousTestClass', None) currentClass = test.__class__ if currentClass == previousClass: return if result._moduleSetUpFailed: return if getattr(currentClass, "__unittest_skip__", False): return try: currentClass._classSetupFailed = False except TypeError: # test may actually be a function # so its class will be a builtin-type pass setUpClass = getattr(currentClass, 'setUpClass', None) if setUpClass is not None: _call_if_exists(result, '_setupStdout') try: setUpClass() except Exception as e: if isinstance(result, _DebugResult): raise currentClass._classSetupFailed = True className = util.strclass(currentClass) self._createClassOrModuleLevelException(result, e, 'setUpClass', className) finally: _call_if_exists(result, '_restoreStdout') if currentClass._classSetupFailed is True: currentClass.doClassCleanups() if len(currentClass.tearDown_exceptions) > 0: for exc in currentClass.tearDown_exceptions: self._createClassOrModuleLevelException( result, exc[1], 'setUpClass', className, info=exc) def _get_previous_module(self, result): previousModule = None previousClass = getattr(result, '_previousTestClass', None) if previousClass is not None: previousModule = previousClass.__module__ return previousModule def _handleModuleFixture(self, test, result): previousModule = self._get_previous_module(result) currentModule = test.__class__.__module__ if currentModule == previousModule: return self._handleModuleTearDown(result) result._moduleSetUpFailed = False try: module = sys.modules[currentModule] except KeyError: return setUpModule = getattr(module, 'setUpModule', None) if setUpModule is not None: _call_if_exists(result, '_setupStdout') try: setUpModule() except Exception as e: try: case.doModuleCleanups() except Exception as exc: self._createClassOrModuleLevelException(result, exc, 'setUpModule', currentModule) if isinstance(result, _DebugResult): raise result._moduleSetUpFailed = True self._createClassOrModuleLevelException(result, e, 'setUpModule', currentModule) finally: _call_if_exists(result, '_restoreStdout') def _createClassOrModuleLevelException(self, result, exc, method_name, parent, info=None): errorName = f'{method_name} ({parent})' self._addClassOrModuleLevelException(result, exc, errorName, info) def _addClassOrModuleLevelException(self, result, exception, errorName, info=None): error = _ErrorHolder(errorName) addSkip = getattr(result, 'addSkip', None) if addSkip is not None and isinstance(exception, case.SkipTest): addSkip(error, str(exception)) else: if not info: result.addError(error, sys.exc_info()) else: result.addError(error, info) def _handleModuleTearDown(self, result): previousModule = self._get_previous_module(result) if previousModule is None: return if result._moduleSetUpFailed: return try: module = sys.modules[previousModule] except KeyError: return tearDownModule = getattr(module, 'tearDownModule', None) if tearDownModule is not None: _call_if_exists(result, '_setupStdout') try: tearDownModule() except Exception as e: if isinstance(result, _DebugResult): raise self._createClassOrModuleLevelException(result, e, 'tearDownModule', previousModule) finally: _call_if_exists(result, '_restoreStdout') try: case.doModuleCleanups() except Exception as e: self._createClassOrModuleLevelException(result, e, 'tearDownModule', previousModule) def _tearDownPreviousClass(self, test, result): previousClass = getattr(result, '_previousTestClass', None) currentClass = test.__class__ if currentClass == previousClass: return if getattr(previousClass, '_classSetupFailed', False): return if getattr(result, '_moduleSetUpFailed', False): return if getattr(previousClass, "__unittest_skip__", False): return tearDownClass = getattr(previousClass, 'tearDownClass', None) if tearDownClass is not None: _call_if_exists(result, '_setupStdout') try: tearDownClass() except Exception as e: if isinstance(result, _DebugResult): raise className = util.strclass(previousClass) self._createClassOrModuleLevelException(result, e, 'tearDownClass', className) finally: _call_if_exists(result, '_restoreStdout') previousClass.doClassCleanups() if len(previousClass.tearDown_exceptions) > 0: for exc in previousClass.tearDown_exceptions: className = util.strclass(previousClass) self._createClassOrModuleLevelException(result, exc[1], 'tearDownClass', className, info=exc) class _ErrorHolder(object): """ Placeholder for a TestCase inside a result. As far as a TestResult is concerned, this looks exactly like a unit test. Used to insert arbitrary errors into a test suite run. """ # Inspired by the ErrorHolder from Twisted: # http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py # attribute used by TestResult._exc_info_to_string failureException = None def __init__(self, description): self.description = description def id(self): return self.description def shortDescription(self): return None def __repr__(self): return "<ErrorHolder description=%r>" % (self.description,) def __str__(self): return self.id() def run(self, result): # could call result.addError(...) - but this test-like object # shouldn't be run anyway pass def __call__(self, result): return self.run(result) def countTestCases(self): return 0 def _isnotsuite(test): "A crude way to tell apart testcases and suites with duck-typing" try: iter(test) except TypeError: return True return False class _DebugResult(object): "Used by the TestSuite to hold previous class when running in debug." _previousTestClass = None _moduleSetUpFailed = False shouldStop = False PK �q�Z[[$� result.pynu �[��� """Test result object""" import io import sys import traceback from . import util from functools import wraps __unittest = True def failfast(method): @wraps(method) def inner(self, *args, **kw): if getattr(self, 'failfast', False): self.stop() return method(self, *args, **kw) return inner STDOUT_LINE = '\nStdout:\n%s' STDERR_LINE = '\nStderr:\n%s' class TestResult(object): """Holder for test result information. Test results are automatically managed by the TestCase and TestSuite classes, and do not need to be explicitly manipulated by writers of tests. Each instance holds the total number of tests run, and collections of failures and errors that occurred among those test runs. The collections contain tuples of (testcase, exceptioninfo), where exceptioninfo is the formatted traceback of the error that occurred. """ _previousTestClass = None _testRunEntered = False _moduleSetUpFailed = False def __init__(self, stream=None, descriptions=None, verbosity=None): self.failfast = False self.failures = [] self.errors = [] self.testsRun = 0 self.skipped = [] self.expectedFailures = [] self.unexpectedSuccesses = [] self.shouldStop = False self.buffer = False self.tb_locals = False self._stdout_buffer = None self._stderr_buffer = None self._original_stdout = sys.stdout self._original_stderr = sys.stderr self._mirrorOutput = False def printErrors(self): "Called by TestRunner after test run" def startTest(self, test): "Called when the given test is about to be run" self.testsRun += 1 self._mirrorOutput = False self._setupStdout() def _setupStdout(self): if self.buffer: if self._stderr_buffer is None: self._stderr_buffer = io.StringIO() self._stdout_buffer = io.StringIO() sys.stdout = self._stdout_buffer sys.stderr = self._stderr_buffer def startTestRun(self): """Called once before any tests are executed. See startTest for a method called before each test. """ def stopTest(self, test): """Called when the given test has been run""" self._restoreStdout() self._mirrorOutput = False def _restoreStdout(self): if self.buffer: if self._mirrorOutput: output = sys.stdout.getvalue() error = sys.stderr.getvalue() if output: if not output.endswith('\n'): output += '\n' self._original_stdout.write(STDOUT_LINE % output) if error: if not error.endswith('\n'): error += '\n' self._original_stderr.write(STDERR_LINE % error) sys.stdout = self._original_stdout sys.stderr = self._original_stderr self._stdout_buffer.seek(0) self._stdout_buffer.truncate() self._stderr_buffer.seek(0) self._stderr_buffer.truncate() def stopTestRun(self): """Called once after all tests are executed. See stopTest for a method called after each test. """ @failfast def addError(self, test, err): """Called when an error has occurred. 'err' is a tuple of values as returned by sys.exc_info(). """ self.errors.append((test, self._exc_info_to_string(err, test))) self._mirrorOutput = True @failfast def addFailure(self, test, err): """Called when an error has occurred. 'err' is a tuple of values as returned by sys.exc_info().""" self.failures.append((test, self._exc_info_to_string(err, test))) self._mirrorOutput = True def addSubTest(self, test, subtest, err): """Called at the end of a subtest. 'err' is None if the subtest ended successfully, otherwise it's a tuple of values as returned by sys.exc_info(). """ # By default, we don't do anything with successful subtests, but # more sophisticated test results might want to record them. if err is not None: if getattr(self, 'failfast', False): self.stop() if issubclass(err[0], test.failureException): errors = self.failures else: errors = self.errors errors.append((subtest, self._exc_info_to_string(err, test))) self._mirrorOutput = True def addSuccess(self, test): "Called when a test has completed successfully" pass def addSkip(self, test, reason): """Called when a test is skipped.""" self.skipped.append((test, reason)) def addExpectedFailure(self, test, err): """Called when an expected failure/error occurred.""" self.expectedFailures.append( (test, self._exc_info_to_string(err, test))) @failfast def addUnexpectedSuccess(self, test): """Called when a test was expected to fail, but succeed.""" self.unexpectedSuccesses.append(test) def wasSuccessful(self): """Tells whether or not this result was a success.""" # The hasattr check is for test_result's OldResult test. That # way this method works on objects that lack the attribute. # (where would such result intances come from? old stored pickles?) return ((len(self.failures) == len(self.errors) == 0) and (not hasattr(self, 'unexpectedSuccesses') or len(self.unexpectedSuccesses) == 0)) def stop(self): """Indicates that the tests should be aborted.""" self.shouldStop = True def _exc_info_to_string(self, err, test): """Converts a sys.exc_info()-style tuple of values into a string.""" exctype, value, tb = err # Skip test runner traceback levels while tb and self._is_relevant_tb_level(tb): tb = tb.tb_next if exctype is test.failureException: # Skip assert*() traceback levels length = self._count_relevant_tb_levels(tb) else: length = None tb_e = traceback.TracebackException( exctype, value, tb, limit=length, capture_locals=self.tb_locals) msgLines = list(tb_e.format()) if self.buffer: output = sys.stdout.getvalue() error = sys.stderr.getvalue() if output: if not output.endswith('\n'): output += '\n' msgLines.append(STDOUT_LINE % output) if error: if not error.endswith('\n'): error += '\n' msgLines.append(STDERR_LINE % error) return ''.join(msgLines) def _is_relevant_tb_level(self, tb): return '__unittest' in tb.tb_frame.f_globals def _count_relevant_tb_levels(self, tb): length = 0 while tb and not self._is_relevant_tb_level(tb): length += 1 tb = tb.tb_next return length def __repr__(self): return ("<%s run=%i errors=%i failures=%i>" % (util.strclass(self.__class__), self.testsRun, len(self.errors), len(self.failures))) PK �q�Z��F�8 8 # __pycache__/__init__.cpython-38.pycnu �[��� U e5d� � @ s� d Z ddddddddd d ddd dddddddgZe�dddg� dZddlmZ ddlmZ ddlm Z m Z mZmZm Z mZmZmZ ddlmZmZ ddlmZmZmZmZmZ ddlmZmZ dd lmZmZ dd!lm Z m!Z!m"Z"m#Z# eZ$d"d#� Z%d$S )%a� Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's Smalltalk testing framework (used with permission). This module contains the core framework classes that form the basis of specific test cases and suites (TestCase, TestSuite etc.), and also a text-based utility class for running the tests and reporting the results (TextTestRunner). Simple usage: import unittest class IntegerArithmeticTestCase(unittest.TestCase): def testAdd(self): # test method names begin with 'test' self.assertEqual((1 + 2), 3) self.assertEqual(0 + 1, 1) def testMultiply(self): self.assertEqual((0 * 10), 0) self.assertEqual((5 * 8), 40) if __name__ == '__main__': unittest.main() Further information is available in the bundled documentation, and from http://docs.python.org/library/unittest.html Copyright (c) 1999-2003 Steve Purcell Copyright (c) 2003-2010 Python Software Foundation This module is free software, and you may redistribute it and/or modify it under the same terms as Python itself, so long as this copyright message and disclaimer are retained in their original form. IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. � TestResult�TestCase�IsolatedAsyncioTestCase� TestSuite�TextTestRunner� TestLoader�FunctionTestCase�main�defaultTestLoader�SkipTest�skip�skipIf� skipUnless�expectedFailure�TextTestResult�installHandler�registerResult�removeResult� removeHandler�addModuleCleanup�getTestCaseNames� makeSuite� findTestCasesT� )r )r )r r r r r r r r )� BaseTestSuiter )r r r r r )�TestProgramr )r r )r r r r c C s"