whoami7 - Manager
:
/
home
/
creaupfw
/
public_html
/
wp-includes
/
assets
/
Upload File:
files >> /home/creaupfw/public_html/wp-includes/assets/asyncio.tar
protocols.py 0000644 00000015740 15030732674 0007160 0 ustar 00 """Abstract Protocol base classes.""" __all__ = ( 'BaseProtocol', 'Protocol', 'DatagramProtocol', 'SubprocessProtocol', 'BufferedProtocol', ) class BaseProtocol: """Common base class for protocol interfaces. Usually user implements protocols that derived from BaseProtocol like Protocol or ProcessProtocol. The only case when BaseProtocol should be implemented directly is write-only transport like write pipe """ __slots__ = () def connection_made(self, transport): """Called when a connection is made. The argument is the transport representing the pipe connection. To receive data, wait for data_received() calls. When the connection is closed, connection_lost() is called. """ def connection_lost(self, exc): """Called when the connection is lost or closed. The argument is an exception object or None (the latter meaning a regular EOF is received or the connection was aborted or closed). """ def pause_writing(self): """Called when the transport's buffer goes over the high-water mark. Pause and resume calls are paired -- pause_writing() is called once when the buffer goes strictly over the high-water mark (even if subsequent writes increases the buffer size even more), and eventually resume_writing() is called once when the buffer size reaches the low-water mark. Note that if the buffer size equals the high-water mark, pause_writing() is not called -- it must go strictly over. Conversely, resume_writing() is called when the buffer size is equal or lower than the low-water mark. These end conditions are important to ensure that things go as expected when either mark is zero. NOTE: This is the only Protocol callback that is not called through EventLoop.call_soon() -- if it were, it would have no effect when it's most needed (when the app keeps writing without yielding until pause_writing() is called). """ def resume_writing(self): """Called when the transport's buffer drains below the low-water mark. See pause_writing() for details. """ class Protocol(BaseProtocol): """Interface for stream protocol. The user should implement this interface. They can inherit from this class but don't need to. The implementations here do nothing (they don't raise exceptions). When the user wants to requests a transport, they pass a protocol factory to a utility function (e.g., EventLoop.create_connection()). When the connection is made successfully, connection_made() is called with a suitable transport object. Then data_received() will be called 0 or more times with data (bytes) received from the transport; finally, connection_lost() will be called exactly once with either an exception object or None as an argument. State machine of calls: start -> CM [-> DR*] [-> ER?] -> CL -> end * CM: connection_made() * DR: data_received() * ER: eof_received() * CL: connection_lost() """ __slots__ = () def data_received(self, data): """Called when some data is received. The argument is a bytes object. """ def eof_received(self): """Called when the other end calls write_eof() or equivalent. If this returns a false value (including None), the transport will close itself. If it returns a true value, closing the transport is up to the protocol. """ class BufferedProtocol(BaseProtocol): """Interface for stream protocol with manual buffer control. Important: this has been added to asyncio in Python 3.7 *on a provisional basis*! Consider it as an experimental API that might be changed or removed in Python 3.8. Event methods, such as `create_server` and `create_connection`, accept factories that return protocols that implement this interface. The idea of BufferedProtocol is that it allows to manually allocate and control the receive buffer. Event loops can then use the buffer provided by the protocol to avoid unnecessary data copies. This can result in noticeable performance improvement for protocols that receive big amounts of data. Sophisticated protocols can allocate the buffer only once at creation time. State machine of calls: start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end * CM: connection_made() * GB: get_buffer() * BU: buffer_updated() * ER: eof_received() * CL: connection_lost() """ __slots__ = () def get_buffer(self, sizehint): """Called to allocate a new receive buffer. *sizehint* is a recommended minimal size for the returned buffer. When set to -1, the buffer size can be arbitrary. Must return an object that implements the :ref:`buffer protocol <bufferobjects>`. It is an error to return a zero-sized buffer. """ def buffer_updated(self, nbytes): """Called when the buffer was updated with the received data. *nbytes* is the total number of bytes that were written to the buffer. """ def eof_received(self): """Called when the other end calls write_eof() or equivalent. If this returns a false value (including None), the transport will close itself. If it returns a true value, closing the transport is up to the protocol. """ class DatagramProtocol(BaseProtocol): """Interface for datagram protocol.""" __slots__ = () def datagram_received(self, data, addr): """Called when some datagram is received.""" def error_received(self, exc): """Called when a send or receive operation raises an OSError. (Other than BlockingIOError or InterruptedError.) """ class SubprocessProtocol(BaseProtocol): """Interface for protocol for subprocess calls.""" __slots__ = () def pipe_data_received(self, fd, data): """Called when the subprocess writes data into stdout/stderr pipe. fd is int file descriptor. data is bytes object. """ def pipe_connection_lost(self, fd, exc): """Called when a file descriptor associated with the child process is closed. fd is the int file descriptor that was closed. """ def process_exited(self): """Called when subprocess has exited.""" def _feed_data_to_buffered_proto(proto, data): data_len = len(data) while data_len: buf = proto.get_buffer(data_len) buf_len = len(buf) if not buf_len: raise RuntimeError('get_buffer() returned an empty buffer') if buf_len >= data_len: buf[:data_len] = data proto.buffer_updated(data_len) return else: buf[:buf_len] = data[:buf_len] proto.buffer_updated(buf_len) data = data[buf_len:] data_len = len(data) exceptions.py 0000644 00000003141 15030732674 0007305 0 ustar 00 """asyncio exceptions.""" __all__ = ('CancelledError', 'InvalidStateError', 'TimeoutError', 'IncompleteReadError', 'LimitOverrunError', 'SendfileNotAvailableError') class CancelledError(BaseException): """The Future or Task was cancelled.""" class TimeoutError(Exception): """The operation exceeded the given deadline.""" class InvalidStateError(Exception): """The operation is not allowed in this state.""" class SendfileNotAvailableError(RuntimeError): """Sendfile syscall is not available. Raised if OS does not support sendfile syscall for given socket or file type. """ class IncompleteReadError(EOFError): """ Incomplete read error. Attributes: - partial: read bytes string before the end of stream was reached - expected: total number of expected bytes (or None if unknown) """ def __init__(self, partial, expected): r_expected = 'undefined' if expected is None else repr(expected) super().__init__(f'{len(partial)} bytes read on a total of ' f'{r_expected} expected bytes') self.partial = partial self.expected = expected def __reduce__(self): return type(self), (self.partial, self.expected) class LimitOverrunError(Exception): """Reached the buffer limit while looking for a separator. Attributes: - consumed: total number of to be consumed bytes. """ def __init__(self, message, consumed): super().__init__(message) self.consumed = consumed def __reduce__(self): return type(self), (self.args[0], self.consumed) __init__.py 0000644 00000002313 15030732674 0006663 0 ustar 00 """The asyncio package, tracking PEP 3156.""" # flake8: noqa import sys # This relies on each of the submodules having an __all__ variable. from .base_events import * from .coroutines import * from .events import * from .exceptions import * from .futures import * from .locks import * from .protocols import * from .runners import * from .queues import * from .streams import * from .subprocess import * from .tasks import * from .transports import * # Exposed for _asynciomodule.c to implement now deprecated # Task.all_tasks() method. This function will be removed in 3.9. from .tasks import _all_tasks_compat # NoQA __all__ = (base_events.__all__ + coroutines.__all__ + events.__all__ + exceptions.__all__ + futures.__all__ + locks.__all__ + protocols.__all__ + runners.__all__ + queues.__all__ + streams.__all__ + subprocess.__all__ + tasks.__all__ + transports.__all__) if sys.platform == 'win32': # pragma: no cover from .windows_events import * __all__ += windows_events.__all__ else: from .unix_events import * # pragma: no cover __all__ += unix_events.__all__ __main__.py 0000644 00000006417 15030732674 0006655 0 ustar 00 import ast import asyncio import code import concurrent.futures import inspect import sys import threading import types import warnings from . import futures class AsyncIOInteractiveConsole(code.InteractiveConsole): def __init__(self, locals, loop): super().__init__(locals) self.compile.compiler.flags |= ast.PyCF_ALLOW_TOP_LEVEL_AWAIT self.loop = loop def runcode(self, code): future = concurrent.futures.Future() def callback(): global repl_future global repl_future_interrupted repl_future = None repl_future_interrupted = False func = types.FunctionType(code, self.locals) try: coro = func() except SystemExit: raise except KeyboardInterrupt as ex: repl_future_interrupted = True future.set_exception(ex) return except BaseException as ex: future.set_exception(ex) return if not inspect.iscoroutine(coro): future.set_result(coro) return try: repl_future = self.loop.create_task(coro) futures._chain_future(repl_future, future) except BaseException as exc: future.set_exception(exc) loop.call_soon_threadsafe(callback) try: return future.result() except SystemExit: raise except BaseException: if repl_future_interrupted: self.write("\nKeyboardInterrupt\n") else: self.showtraceback() class REPLThread(threading.Thread): def run(self): try: banner = ( f'asyncio REPL {sys.version} on {sys.platform}\n' f'Use "await" directly instead of "asyncio.run()".\n' f'Type "help", "copyright", "credits" or "license" ' f'for more information.\n' f'{getattr(sys, "ps1", ">>> ")}import asyncio' ) console.interact( banner=banner, exitmsg='exiting asyncio REPL...') finally: warnings.filterwarnings( 'ignore', message=r'^coroutine .* was never awaited$', category=RuntimeWarning) loop.call_soon_threadsafe(loop.stop) if __name__ == '__main__': loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) repl_locals = {'asyncio': asyncio} for key in {'__name__', '__package__', '__loader__', '__spec__', '__builtins__', '__file__'}: repl_locals[key] = locals()[key] console = AsyncIOInteractiveConsole(repl_locals, loop) repl_future = None repl_future_interrupted = False try: import readline # NoQA except ImportError: pass repl_thread = REPLThread() repl_thread.daemon = True repl_thread.start() while True: try: loop.run_forever() except KeyboardInterrupt: if repl_future and not repl_future.done(): repl_future.cancel() repl_future_interrupted = True continue else: break coroutines.py 0000644 00000021135 15030732674 0007321 0 ustar 00 __all__ = 'coroutine', 'iscoroutinefunction', 'iscoroutine' import collections.abc import functools import inspect import os import sys import traceback import types import warnings from . import base_futures from . import constants from . import format_helpers from .log import logger def _is_debug_mode(): # If you set _DEBUG to true, @coroutine will wrap the resulting # generator objects in a CoroWrapper instance (defined below). That # instance will log a message when the generator is never iterated # over, which may happen when you forget to use "await" or "yield from" # with a coroutine call. # Note that the value of the _DEBUG flag is taken # when the decorator is used, so to be of any use it must be set # before you define your coroutines. A downside of using this feature # is that tracebacks show entries for the CoroWrapper.__next__ method # when _DEBUG is true. return sys.flags.dev_mode or (not sys.flags.ignore_environment and bool(os.environ.get('PYTHONASYNCIODEBUG'))) _DEBUG = _is_debug_mode() class CoroWrapper: # Wrapper for coroutine object in _DEBUG mode. def __init__(self, gen, func=None): assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen self.gen = gen self.func = func # Used to unwrap @coroutine decorator self._source_traceback = format_helpers.extract_stack(sys._getframe(1)) self.__name__ = getattr(gen, '__name__', None) self.__qualname__ = getattr(gen, '__qualname__', None) def __repr__(self): coro_repr = _format_coroutine(self) if self._source_traceback: frame = self._source_traceback[-1] coro_repr += f', created at {frame[0]}:{frame[1]}' return f'<{self.__class__.__name__} {coro_repr}>' def __iter__(self): return self def __next__(self): return self.gen.send(None) def send(self, value): return self.gen.send(value) def throw(self, type, value=None, traceback=None): return self.gen.throw(type, value, traceback) def close(self): return self.gen.close() @property def gi_frame(self): return self.gen.gi_frame @property def gi_running(self): return self.gen.gi_running @property def gi_code(self): return self.gen.gi_code def __await__(self): return self @property def gi_yieldfrom(self): return self.gen.gi_yieldfrom def __del__(self): # Be careful accessing self.gen.frame -- self.gen might not exist. gen = getattr(self, 'gen', None) frame = getattr(gen, 'gi_frame', None) if frame is not None and frame.f_lasti == -1: msg = f'{self!r} was never yielded from' tb = getattr(self, '_source_traceback', ()) if tb: tb = ''.join(traceback.format_list(tb)) msg += (f'\nCoroutine object created at ' f'(most recent call last, truncated to ' f'{constants.DEBUG_STACK_DEPTH} last lines):\n') msg += tb.rstrip() logger.error(msg) def coroutine(func): """Decorator to mark coroutines. If the coroutine is not yielded from before it is destroyed, an error message is logged. """ warnings.warn('"@coroutine" decorator is deprecated since Python 3.8, use "async def" instead', DeprecationWarning, stacklevel=2) if inspect.iscoroutinefunction(func): # In Python 3.5 that's all we need to do for coroutines # defined with "async def". return func if inspect.isgeneratorfunction(func): coro = func else: @functools.wraps(func) def coro(*args, **kw): res = func(*args, **kw) if (base_futures.isfuture(res) or inspect.isgenerator(res) or isinstance(res, CoroWrapper)): res = yield from res else: # If 'res' is an awaitable, run it. try: await_meth = res.__await__ except AttributeError: pass else: if isinstance(res, collections.abc.Awaitable): res = yield from await_meth() return res coro = types.coroutine(coro) if not _DEBUG: wrapper = coro else: @functools.wraps(func) def wrapper(*args, **kwds): w = CoroWrapper(coro(*args, **kwds), func=func) if w._source_traceback: del w._source_traceback[-1] # Python < 3.5 does not implement __qualname__ # on generator objects, so we set it manually. # We use getattr as some callables (such as # functools.partial may lack __qualname__). w.__name__ = getattr(func, '__name__', None) w.__qualname__ = getattr(func, '__qualname__', None) return w wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction(). return wrapper # A marker for iscoroutinefunction. _is_coroutine = object() def iscoroutinefunction(func): """Return True if func is a decorated coroutine function.""" return (inspect.iscoroutinefunction(func) or getattr(func, '_is_coroutine', None) is _is_coroutine) # Prioritize native coroutine check to speed-up # asyncio.iscoroutine. _COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType, collections.abc.Coroutine, CoroWrapper) _iscoroutine_typecache = set() def iscoroutine(obj): """Return True if obj is a coroutine object.""" if type(obj) in _iscoroutine_typecache: return True if isinstance(obj, _COROUTINE_TYPES): # Just in case we don't want to cache more than 100 # positive types. That shouldn't ever happen, unless # someone stressing the system on purpose. if len(_iscoroutine_typecache) < 100: _iscoroutine_typecache.add(type(obj)) return True else: return False def _format_coroutine(coro): assert iscoroutine(coro) is_corowrapper = isinstance(coro, CoroWrapper) def get_name(coro): # Coroutines compiled with Cython sometimes don't have # proper __qualname__ or __name__. While that is a bug # in Cython, asyncio shouldn't crash with an AttributeError # in its __repr__ functions. if is_corowrapper: return format_helpers._format_callback(coro.func, (), {}) if hasattr(coro, '__qualname__') and coro.__qualname__: coro_name = coro.__qualname__ elif hasattr(coro, '__name__') and coro.__name__: coro_name = coro.__name__ else: # Stop masking Cython bugs, expose them in a friendly way. coro_name = f'<{type(coro).__name__} without __name__>' return f'{coro_name}()' def is_running(coro): try: return coro.cr_running except AttributeError: try: return coro.gi_running except AttributeError: return False coro_code = None if hasattr(coro, 'cr_code') and coro.cr_code: coro_code = coro.cr_code elif hasattr(coro, 'gi_code') and coro.gi_code: coro_code = coro.gi_code coro_name = get_name(coro) if not coro_code: # Built-in types might not have __qualname__ or __name__. if is_running(coro): return f'{coro_name} running' else: return coro_name coro_frame = None if hasattr(coro, 'gi_frame') and coro.gi_frame: coro_frame = coro.gi_frame elif hasattr(coro, 'cr_frame') and coro.cr_frame: coro_frame = coro.cr_frame # If Cython's coroutine has a fake code object without proper # co_filename -- expose that. filename = coro_code.co_filename or '<empty co_filename>' lineno = 0 if (is_corowrapper and coro.func is not None and not inspect.isgeneratorfunction(coro.func)): source = format_helpers._get_function_source(coro.func) if source is not None: filename, lineno = source if coro_frame is None: coro_repr = f'{coro_name} done, defined at {filename}:{lineno}' else: coro_repr = f'{coro_name} running, defined at {filename}:{lineno}' elif coro_frame is not None: lineno = coro_frame.f_lineno coro_repr = f'{coro_name} running at {filename}:{lineno}' else: lineno = coro_code.co_firstlineno coro_repr = f'{coro_name} done, defined at {filename}:{lineno}' return coro_repr base_subprocess.py 0000644 00000021213 15030732674 0010306 0 ustar 00 import collections import subprocess import warnings from . import protocols from . import transports from .log import logger class BaseSubprocessTransport(transports.SubprocessTransport): def __init__(self, loop, protocol, args, shell, stdin, stdout, stderr, bufsize, waiter=None, extra=None, **kwargs): super().__init__(extra) self._closed = False self._protocol = protocol self._loop = loop self._proc = None self._pid = None self._returncode = None self._exit_waiters = [] self._pending_calls = collections.deque() self._pipes = {} self._finished = False if stdin == subprocess.PIPE: self._pipes[0] = None if stdout == subprocess.PIPE: self._pipes[1] = None if stderr == subprocess.PIPE: self._pipes[2] = None # Create the child process: set the _proc attribute try: self._start(args=args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, bufsize=bufsize, **kwargs) except: self.close() raise self._pid = self._proc.pid self._extra['subprocess'] = self._proc if self._loop.get_debug(): if isinstance(args, (bytes, str)): program = args else: program = args[0] logger.debug('process %r created: pid %s', program, self._pid) self._loop.create_task(self._connect_pipes(waiter)) def __repr__(self): info = [self.__class__.__name__] if self._closed: info.append('closed') if self._pid is not None: info.append(f'pid={self._pid}') if self._returncode is not None: info.append(f'returncode={self._returncode}') elif self._pid is not None: info.append('running') else: info.append('not started') stdin = self._pipes.get(0) if stdin is not None: info.append(f'stdin={stdin.pipe}') stdout = self._pipes.get(1) stderr = self._pipes.get(2) if stdout is not None and stderr is stdout: info.append(f'stdout=stderr={stdout.pipe}') else: if stdout is not None: info.append(f'stdout={stdout.pipe}') if stderr is not None: info.append(f'stderr={stderr.pipe}') return '<{}>'.format(' '.join(info)) def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): raise NotImplementedError def set_protocol(self, protocol): self._protocol = protocol def get_protocol(self): return self._protocol def is_closing(self): return self._closed def close(self): if self._closed: return self._closed = True for proto in self._pipes.values(): if proto is None: continue proto.pipe.close() if (self._proc is not None and # has the child process finished? self._returncode is None and # the child process has finished, but the # transport hasn't been notified yet? self._proc.poll() is None): if self._loop.get_debug(): logger.warning('Close running child process: kill %r', self) try: self._proc.kill() except ProcessLookupError: pass # Don't clear the _proc reference yet: _post_init() may still run def __del__(self, _warn=warnings.warn): if not self._closed: _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) self.close() def get_pid(self): return self._pid def get_returncode(self): return self._returncode def get_pipe_transport(self, fd): if fd in self._pipes: return self._pipes[fd].pipe else: return None def _check_proc(self): if self._proc is None: raise ProcessLookupError() def send_signal(self, signal): self._check_proc() self._proc.send_signal(signal) def terminate(self): self._check_proc() self._proc.terminate() def kill(self): self._check_proc() self._proc.kill() async def _connect_pipes(self, waiter): try: proc = self._proc loop = self._loop if proc.stdin is not None: _, pipe = await loop.connect_write_pipe( lambda: WriteSubprocessPipeProto(self, 0), proc.stdin) self._pipes[0] = pipe if proc.stdout is not None: _, pipe = await loop.connect_read_pipe( lambda: ReadSubprocessPipeProto(self, 1), proc.stdout) self._pipes[1] = pipe if proc.stderr is not None: _, pipe = await loop.connect_read_pipe( lambda: ReadSubprocessPipeProto(self, 2), proc.stderr) self._pipes[2] = pipe assert self._pending_calls is not None loop.call_soon(self._protocol.connection_made, self) for callback, data in self._pending_calls: loop.call_soon(callback, *data) self._pending_calls = None except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: if waiter is not None and not waiter.cancelled(): waiter.set_exception(exc) else: if waiter is not None and not waiter.cancelled(): waiter.set_result(None) def _call(self, cb, *data): if self._pending_calls is not None: self._pending_calls.append((cb, data)) else: self._loop.call_soon(cb, *data) def _pipe_connection_lost(self, fd, exc): self._call(self._protocol.pipe_connection_lost, fd, exc) self._try_finish() def _pipe_data_received(self, fd, data): self._call(self._protocol.pipe_data_received, fd, data) def _process_exited(self, returncode): assert returncode is not None, returncode assert self._returncode is None, self._returncode if self._loop.get_debug(): logger.info('%r exited with return code %r', self, returncode) self._returncode = returncode if self._proc.returncode is None: # asyncio uses a child watcher: copy the status into the Popen # object. On Python 3.6, it is required to avoid a ResourceWarning. self._proc.returncode = returncode self._call(self._protocol.process_exited) self._try_finish() # wake up futures waiting for wait() for waiter in self._exit_waiters: if not waiter.cancelled(): waiter.set_result(returncode) self._exit_waiters = None async def _wait(self): """Wait until the process exit and return the process return code. This method is a coroutine.""" if self._returncode is not None: return self._returncode waiter = self._loop.create_future() self._exit_waiters.append(waiter) return await waiter def _try_finish(self): assert not self._finished if self._returncode is None: return if all(p is not None and p.disconnected for p in self._pipes.values()): self._finished = True self._call(self._call_connection_lost, None) def _call_connection_lost(self, exc): try: self._protocol.connection_lost(exc) finally: self._loop = None self._proc = None self._protocol = None class WriteSubprocessPipeProto(protocols.BaseProtocol): def __init__(self, proc, fd): self.proc = proc self.fd = fd self.pipe = None self.disconnected = False def connection_made(self, transport): self.pipe = transport def __repr__(self): return f'<{self.__class__.__name__} fd={self.fd} pipe={self.pipe!r}>' def connection_lost(self, exc): self.disconnected = True self.proc._pipe_connection_lost(self.fd, exc) self.proc = None def pause_writing(self): self.proc._protocol.pause_writing() def resume_writing(self): self.proc._protocol.resume_writing() class ReadSubprocessPipeProto(WriteSubprocessPipeProto, protocols.Protocol): def data_received(self, data): self.proc._pipe_data_received(self.fd, data) base_futures.py 0000644 00000005016 15030732674 0007616 0 ustar 00 __all__ = () import reprlib from _thread import get_ident from . import format_helpers # States for Future. _PENDING = 'PENDING' _CANCELLED = 'CANCELLED' _FINISHED = 'FINISHED' def isfuture(obj): """Check for a Future. This returns True when obj is a Future instance or is advertising itself as duck-type compatible by setting _asyncio_future_blocking. See comment in Future for more details. """ return (hasattr(obj.__class__, '_asyncio_future_blocking') and obj._asyncio_future_blocking is not None) def _format_callbacks(cb): """helper function for Future.__repr__""" size = len(cb) if not size: cb = '' def format_cb(callback): return format_helpers._format_callback_source(callback, ()) if size == 1: cb = format_cb(cb[0][0]) elif size == 2: cb = '{}, {}'.format(format_cb(cb[0][0]), format_cb(cb[1][0])) elif size > 2: cb = '{}, <{} more>, {}'.format(format_cb(cb[0][0]), size - 2, format_cb(cb[-1][0])) return f'cb=[{cb}]' # bpo-42183: _repr_running is needed for repr protection # when a Future or Task result contains itself directly or indirectly. # The logic is borrowed from @reprlib.recursive_repr decorator. # Unfortunately, the direct decorator usage is impossible because of # AttributeError: '_asyncio.Task' object has no attribute '__module__' error. # # After fixing this thing we can return to the decorator based approach. _repr_running = set() def _future_repr_info(future): # (Future) -> str """helper function for Future.__repr__""" info = [future._state.lower()] if future._state == _FINISHED: if future._exception is not None: info.append(f'exception={future._exception!r}') else: key = id(future), get_ident() if key in _repr_running: result = '...' else: _repr_running.add(key) try: # use reprlib to limit the length of the output, especially # for very long strings result = reprlib.repr(future._result) finally: _repr_running.discard(key) info.append(f'result={result}') if future._callbacks: info.append(_format_callbacks(future._callbacks)) if future._source_traceback: frame = future._source_traceback[-1] info.append(f'created at {frame[0]}:{frame[1]}') return info proactor_events.py 0000644 00000076474 15030732674 0010364 0 ustar 00 """Event loop using a proactor and related classes. A proactor is a "notify-on-completion" multiplexer. Currently a proactor is only implemented on Windows with IOCP. """ __all__ = 'BaseProactorEventLoop', import io import os import socket import warnings import signal import threading import collections from . import base_events from . import constants from . import futures from . import exceptions from . import protocols from . import sslproto from . import transports from . import trsock from .log import logger def _set_socket_extra(transport, sock): transport._extra['socket'] = trsock.TransportSocket(sock) try: transport._extra['sockname'] = sock.getsockname() except socket.error: if transport._loop.get_debug(): logger.warning( "getsockname() failed on %r", sock, exc_info=True) if 'peername' not in transport._extra: try: transport._extra['peername'] = sock.getpeername() except socket.error: # UDP sockets may not have a peer name transport._extra['peername'] = None class _ProactorBasePipeTransport(transports._FlowControlMixin, transports.BaseTransport): """Base class for pipe and socket transports.""" def __init__(self, loop, sock, protocol, waiter=None, extra=None, server=None): super().__init__(extra, loop) self._set_extra(sock) self._sock = sock self.set_protocol(protocol) self._server = server self._buffer = None # None or bytearray. self._read_fut = None self._write_fut = None self._pending_write = 0 self._conn_lost = 0 self._closing = False # Set when close() called. self._eof_written = False if self._server is not None: self._server._attach() self._loop.call_soon(self._protocol.connection_made, self) if waiter is not None: # only wake up the waiter when connection_made() has been called self._loop.call_soon(futures._set_result_unless_cancelled, waiter, None) def __repr__(self): info = [self.__class__.__name__] if self._sock is None: info.append('closed') elif self._closing: info.append('closing') if self._sock is not None: info.append(f'fd={self._sock.fileno()}') if self._read_fut is not None: info.append(f'read={self._read_fut!r}') if self._write_fut is not None: info.append(f'write={self._write_fut!r}') if self._buffer: info.append(f'write_bufsize={len(self._buffer)}') if self._eof_written: info.append('EOF written') return '<{}>'.format(' '.join(info)) def _set_extra(self, sock): self._extra['pipe'] = sock def set_protocol(self, protocol): self._protocol = protocol def get_protocol(self): return self._protocol def is_closing(self): return self._closing def close(self): if self._closing: return self._closing = True self._conn_lost += 1 if not self._buffer and self._write_fut is None: self._loop.call_soon(self._call_connection_lost, None) if self._read_fut is not None: self._read_fut.cancel() self._read_fut = None def __del__(self, _warn=warnings.warn): if self._sock is not None: _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) self.close() def _fatal_error(self, exc, message='Fatal error on pipe transport'): try: if isinstance(exc, OSError): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: self._loop.call_exception_handler({ 'message': message, 'exception': exc, 'transport': self, 'protocol': self._protocol, }) finally: self._force_close(exc) def _force_close(self, exc): if self._empty_waiter is not None and not self._empty_waiter.done(): if exc is None: self._empty_waiter.set_result(None) else: self._empty_waiter.set_exception(exc) if self._closing: return self._closing = True self._conn_lost += 1 if self._write_fut: self._write_fut.cancel() self._write_fut = None if self._read_fut: self._read_fut.cancel() self._read_fut = None self._pending_write = 0 self._buffer = None self._loop.call_soon(self._call_connection_lost, exc) def _call_connection_lost(self, exc): try: self._protocol.connection_lost(exc) finally: # XXX If there is a pending overlapped read on the other # end then it may fail with ERROR_NETNAME_DELETED if we # just close our end. First calling shutdown() seems to # cure it, but maybe using DisconnectEx() would be better. if hasattr(self._sock, 'shutdown'): self._sock.shutdown(socket.SHUT_RDWR) self._sock.close() self._sock = None server = self._server if server is not None: server._detach() self._server = None def get_write_buffer_size(self): size = self._pending_write if self._buffer is not None: size += len(self._buffer) return size class _ProactorReadPipeTransport(_ProactorBasePipeTransport, transports.ReadTransport): """Transport for read pipes.""" def __init__(self, loop, sock, protocol, waiter=None, extra=None, server=None): self._pending_data = None self._paused = True super().__init__(loop, sock, protocol, waiter, extra, server) self._loop.call_soon(self._loop_reading) self._paused = False def is_reading(self): return not self._paused and not self._closing def pause_reading(self): if self._closing or self._paused: return self._paused = True # bpo-33694: Don't cancel self._read_fut because cancelling an # overlapped WSASend() loss silently data with the current proactor # implementation. # # If CancelIoEx() fails with ERROR_NOT_FOUND, it means that WSASend() # completed (even if HasOverlappedIoCompleted() returns 0), but # Overlapped.cancel() currently silently ignores the ERROR_NOT_FOUND # error. Once the overlapped is ignored, the IOCP loop will ignores the # completion I/O event and so not read the result of the overlapped # WSARecv(). if self._loop.get_debug(): logger.debug("%r pauses reading", self) def resume_reading(self): if self._closing or not self._paused: return self._paused = False if self._read_fut is None: self._loop.call_soon(self._loop_reading, None) data = self._pending_data self._pending_data = None if data is not None: # Call the protocol methode after calling _loop_reading(), # since the protocol can decide to pause reading again. self._loop.call_soon(self._data_received, data) if self._loop.get_debug(): logger.debug("%r resumes reading", self) def _eof_received(self): if self._loop.get_debug(): logger.debug("%r received EOF", self) try: keep_open = self._protocol.eof_received() except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._fatal_error( exc, 'Fatal error: protocol.eof_received() call failed.') return if not keep_open: self.close() def _data_received(self, data): if self._paused: # Don't call any protocol method while reading is paused. # The protocol will be called on resume_reading(). assert self._pending_data is None self._pending_data = data return if not data: self._eof_received() return if isinstance(self._protocol, protocols.BufferedProtocol): try: protocols._feed_data_to_buffered_proto(self._protocol, data) except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._fatal_error(exc, 'Fatal error: protocol.buffer_updated() ' 'call failed.') return else: self._protocol.data_received(data) def _loop_reading(self, fut=None): data = None try: if fut is not None: assert self._read_fut is fut or (self._read_fut is None and self._closing) self._read_fut = None if fut.done(): # deliver data later in "finally" clause data = fut.result() else: # the future will be replaced by next proactor.recv call fut.cancel() if self._closing: # since close() has been called we ignore any read data data = None return if data == b'': # we got end-of-file so no need to reschedule a new read return # bpo-33694: buffer_updated() has currently no fast path because of # a data loss issue caused by overlapped WSASend() cancellation. if not self._paused: # reschedule a new read self._read_fut = self._loop._proactor.recv(self._sock, 32768) except ConnectionAbortedError as exc: if not self._closing: self._fatal_error(exc, 'Fatal read error on pipe transport') elif self._loop.get_debug(): logger.debug("Read error on pipe transport while closing", exc_info=True) except ConnectionResetError as exc: self._force_close(exc) except OSError as exc: self._fatal_error(exc, 'Fatal read error on pipe transport') except exceptions.CancelledError: if not self._closing: raise else: if not self._paused: self._read_fut.add_done_callback(self._loop_reading) finally: if data is not None: self._data_received(data) class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport, transports.WriteTransport): """Transport for write pipes.""" _start_tls_compatible = True def __init__(self, *args, **kw): super().__init__(*args, **kw) self._empty_waiter = None def write(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): raise TypeError( f"data argument must be a bytes-like object, " f"not {type(data).__name__}") if self._eof_written: raise RuntimeError('write_eof() already called') if self._empty_waiter is not None: raise RuntimeError('unable to write; sendfile is in progress') if not data: return if self._conn_lost: if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: logger.warning('socket.send() raised exception.') self._conn_lost += 1 return # Observable states: # 1. IDLE: _write_fut and _buffer both None # 2. WRITING: _write_fut set; _buffer None # 3. BACKED UP: _write_fut set; _buffer a bytearray # We always copy the data, so the caller can't modify it # while we're still waiting for the I/O to happen. if self._write_fut is None: # IDLE -> WRITING assert self._buffer is None # Pass a copy, except if it's already immutable. self._loop_writing(data=bytes(data)) elif not self._buffer: # WRITING -> BACKED UP # Make a mutable copy which we can extend. self._buffer = bytearray(data) self._maybe_pause_protocol() else: # BACKED UP # Append to buffer (also copies). self._buffer.extend(data) self._maybe_pause_protocol() def _loop_writing(self, f=None, data=None): try: if f is not None and self._write_fut is None and self._closing: # XXX most likely self._force_close() has been called, and # it has set self._write_fut to None. return assert f is self._write_fut self._write_fut = None self._pending_write = 0 if f: f.result() if data is None: data = self._buffer self._buffer = None if not data: if self._closing: self._loop.call_soon(self._call_connection_lost, None) if self._eof_written: self._sock.shutdown(socket.SHUT_WR) # Now that we've reduced the buffer size, tell the # protocol to resume writing if it was paused. Note that # we do this last since the callback is called immediately # and it may add more data to the buffer (even causing the # protocol to be paused again). self._maybe_resume_protocol() else: self._write_fut = self._loop._proactor.send(self._sock, data) if not self._write_fut.done(): assert self._pending_write == 0 self._pending_write = len(data) self._write_fut.add_done_callback(self._loop_writing) self._maybe_pause_protocol() else: self._write_fut.add_done_callback(self._loop_writing) if self._empty_waiter is not None and self._write_fut is None: self._empty_waiter.set_result(None) except ConnectionResetError as exc: self._force_close(exc) except OSError as exc: self._fatal_error(exc, 'Fatal write error on pipe transport') def can_write_eof(self): return True def write_eof(self): self.close() def abort(self): self._force_close(None) def _make_empty_waiter(self): if self._empty_waiter is not None: raise RuntimeError("Empty waiter is already set") self._empty_waiter = self._loop.create_future() if self._write_fut is None: self._empty_waiter.set_result(None) return self._empty_waiter def _reset_empty_waiter(self): self._empty_waiter = None class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport): def __init__(self, *args, **kw): super().__init__(*args, **kw) self._read_fut = self._loop._proactor.recv(self._sock, 16) self._read_fut.add_done_callback(self._pipe_closed) def _pipe_closed(self, fut): if fut.cancelled(): # the transport has been closed return assert fut.result() == b'' if self._closing: assert self._read_fut is None return assert fut is self._read_fut, (fut, self._read_fut) self._read_fut = None if self._write_fut is not None: self._force_close(BrokenPipeError()) else: self.close() class _ProactorDatagramTransport(_ProactorBasePipeTransport): max_size = 256 * 1024 def __init__(self, loop, sock, protocol, address=None, waiter=None, extra=None): self._address = address self._empty_waiter = None # We don't need to call _protocol.connection_made() since our base # constructor does it for us. super().__init__(loop, sock, protocol, waiter=waiter, extra=extra) # The base constructor sets _buffer = None, so we set it here self._buffer = collections.deque() self._loop.call_soon(self._loop_reading) def _set_extra(self, sock): _set_socket_extra(self, sock) def get_write_buffer_size(self): return sum(len(data) for data, _ in self._buffer) def abort(self): self._force_close(None) def sendto(self, data, addr=None): if not isinstance(data, (bytes, bytearray, memoryview)): raise TypeError('data argument must be bytes-like object (%r)', type(data)) if not data: return if self._address is not None and addr not in (None, self._address): raise ValueError( f'Invalid address: must be None or {self._address}') if self._conn_lost and self._address: if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: logger.warning('socket.sendto() raised exception.') self._conn_lost += 1 return # Ensure that what we buffer is immutable. self._buffer.append((bytes(data), addr)) if self._write_fut is None: # No current write operations are active, kick one off self._loop_writing() # else: A write operation is already kicked off self._maybe_pause_protocol() def _loop_writing(self, fut=None): try: if self._conn_lost: return assert fut is self._write_fut self._write_fut = None if fut: # We are in a _loop_writing() done callback, get the result fut.result() if not self._buffer or (self._conn_lost and self._address): # The connection has been closed if self._closing: self._loop.call_soon(self._call_connection_lost, None) return data, addr = self._buffer.popleft() if self._address is not None: self._write_fut = self._loop._proactor.send(self._sock, data) else: self._write_fut = self._loop._proactor.sendto(self._sock, data, addr=addr) except OSError as exc: self._protocol.error_received(exc) except Exception as exc: self._fatal_error(exc, 'Fatal write error on datagram transport') else: self._write_fut.add_done_callback(self._loop_writing) self._maybe_resume_protocol() def _loop_reading(self, fut=None): data = None try: if self._conn_lost: return assert self._read_fut is fut or (self._read_fut is None and self._closing) self._read_fut = None if fut is not None: res = fut.result() if self._closing: # since close() has been called we ignore any read data data = None return if self._address is not None: data, addr = res, self._address else: data, addr = res if self._conn_lost: return if self._address is not None: self._read_fut = self._loop._proactor.recv(self._sock, self.max_size) else: self._read_fut = self._loop._proactor.recvfrom(self._sock, self.max_size) except OSError as exc: self._protocol.error_received(exc) except exceptions.CancelledError: if not self._closing: raise else: if self._read_fut is not None: self._read_fut.add_done_callback(self._loop_reading) finally: if data: self._protocol.datagram_received(data, addr) class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport, _ProactorBaseWritePipeTransport, transports.Transport): """Transport for duplex pipes.""" def can_write_eof(self): return False def write_eof(self): raise NotImplementedError class _ProactorSocketTransport(_ProactorReadPipeTransport, _ProactorBaseWritePipeTransport, transports.Transport): """Transport for connected sockets.""" _sendfile_compatible = constants._SendfileMode.TRY_NATIVE def __init__(self, loop, sock, protocol, waiter=None, extra=None, server=None): super().__init__(loop, sock, protocol, waiter, extra, server) base_events._set_nodelay(sock) def _set_extra(self, sock): _set_socket_extra(self, sock) def can_write_eof(self): return True def write_eof(self): if self._closing or self._eof_written: return self._eof_written = True if self._write_fut is None: self._sock.shutdown(socket.SHUT_WR) class BaseProactorEventLoop(base_events.BaseEventLoop): def __init__(self, proactor): super().__init__() logger.debug('Using proactor: %s', proactor.__class__.__name__) self._proactor = proactor self._selector = proactor # convenient alias self._self_reading_future = None self._accept_futures = {} # socket file descriptor => Future proactor.set_loop(self) self._make_self_pipe() if threading.current_thread() is threading.main_thread(): # wakeup fd can only be installed to a file descriptor from the main thread signal.set_wakeup_fd(self._csock.fileno()) def _make_socket_transport(self, sock, protocol, waiter=None, extra=None, server=None): return _ProactorSocketTransport(self, sock, protocol, waiter, extra, server) def _make_ssl_transport( self, rawsock, protocol, sslcontext, waiter=None, *, server_side=False, server_hostname=None, extra=None, server=None, ssl_handshake_timeout=None): ssl_protocol = sslproto.SSLProtocol( self, protocol, sslcontext, waiter, server_side, server_hostname, ssl_handshake_timeout=ssl_handshake_timeout) _ProactorSocketTransport(self, rawsock, ssl_protocol, extra=extra, server=server) return ssl_protocol._app_transport def _make_datagram_transport(self, sock, protocol, address=None, waiter=None, extra=None): return _ProactorDatagramTransport(self, sock, protocol, address, waiter, extra) def _make_duplex_pipe_transport(self, sock, protocol, waiter=None, extra=None): return _ProactorDuplexPipeTransport(self, sock, protocol, waiter, extra) def _make_read_pipe_transport(self, sock, protocol, waiter=None, extra=None): return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra) def _make_write_pipe_transport(self, sock, protocol, waiter=None, extra=None): # We want connection_lost() to be called when other end closes return _ProactorWritePipeTransport(self, sock, protocol, waiter, extra) def close(self): if self.is_running(): raise RuntimeError("Cannot close a running event loop") if self.is_closed(): return if threading.current_thread() is threading.main_thread(): signal.set_wakeup_fd(-1) # Call these methods before closing the event loop (before calling # BaseEventLoop.close), because they can schedule callbacks with # call_soon(), which is forbidden when the event loop is closed. self._stop_accept_futures() self._close_self_pipe() self._proactor.close() self._proactor = None self._selector = None # Close the event loop super().close() async def sock_recv(self, sock, n): return await self._proactor.recv(sock, n) async def sock_recv_into(self, sock, buf): return await self._proactor.recv_into(sock, buf) async def sock_sendall(self, sock, data): return await self._proactor.send(sock, data) async def sock_connect(self, sock, address): return await self._proactor.connect(sock, address) async def sock_accept(self, sock): return await self._proactor.accept(sock) async def _sock_sendfile_native(self, sock, file, offset, count): try: fileno = file.fileno() except (AttributeError, io.UnsupportedOperation) as err: raise exceptions.SendfileNotAvailableError("not a regular file") try: fsize = os.fstat(fileno).st_size except OSError as err: raise exceptions.SendfileNotAvailableError("not a regular file") blocksize = count if count else fsize if not blocksize: return 0 # empty file blocksize = min(blocksize, 0xffff_ffff) end_pos = min(offset + count, fsize) if count else fsize offset = min(offset, fsize) total_sent = 0 try: while True: blocksize = min(end_pos - offset, blocksize) if blocksize <= 0: return total_sent await self._proactor.sendfile(sock, file, offset, blocksize) offset += blocksize total_sent += blocksize finally: if total_sent > 0: file.seek(offset) async def _sendfile_native(self, transp, file, offset, count): resume_reading = transp.is_reading() transp.pause_reading() await transp._make_empty_waiter() try: return await self.sock_sendfile(transp._sock, file, offset, count, fallback=False) finally: transp._reset_empty_waiter() if resume_reading: transp.resume_reading() def _close_self_pipe(self): if self._self_reading_future is not None: self._self_reading_future.cancel() self._self_reading_future = None self._ssock.close() self._ssock = None self._csock.close() self._csock = None self._internal_fds -= 1 def _make_self_pipe(self): # A self-socket, really. :-) self._ssock, self._csock = socket.socketpair() self._ssock.setblocking(False) self._csock.setblocking(False) self._internal_fds += 1 def _loop_self_reading(self, f=None): try: if f is not None: f.result() # may raise if self._self_reading_future is not f: # When we scheduled this Future, we assigned it to # _self_reading_future. If it's not there now, something has # tried to cancel the loop while this callback was still in the # queue (see windows_events.ProactorEventLoop.run_forever). In # that case stop here instead of continuing to schedule a new # iteration. return f = self._proactor.recv(self._ssock, 4096) except exceptions.CancelledError: # _close_self_pipe() has been called, stop waiting for data return except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self.call_exception_handler({ 'message': 'Error on reading from the event loop self pipe', 'exception': exc, 'loop': self, }) else: self._self_reading_future = f f.add_done_callback(self._loop_self_reading) def _write_to_self(self): # This may be called from a different thread, possibly after # _close_self_pipe() has been called or even while it is # running. Guard for self._csock being None or closed. When # a socket is closed, send() raises OSError (with errno set to # EBADF, but let's not rely on the exact error code). csock = self._csock if csock is None: return try: csock.send(b'\0') except OSError: if self._debug: logger.debug("Fail to write a null byte into the " "self-pipe socket", exc_info=True) def _start_serving(self, protocol_factory, sock, sslcontext=None, server=None, backlog=100, ssl_handshake_timeout=None): def loop(f=None): try: if f is not None: conn, addr = f.result() if self._debug: logger.debug("%r got a new connection from %r: %r", server, addr, conn) protocol = protocol_factory() if sslcontext is not None: self._make_ssl_transport( conn, protocol, sslcontext, server_side=True, extra={'peername': addr}, server=server, ssl_handshake_timeout=ssl_handshake_timeout) else: self._make_socket_transport( conn, protocol, extra={'peername': addr}, server=server) if self.is_closed(): return f = self._proactor.accept(sock) except OSError as exc: if sock.fileno() != -1: self.call_exception_handler({ 'message': 'Accept failed on a socket', 'exception': exc, 'socket': trsock.TransportSocket(sock), }) sock.close() elif self._debug: logger.debug("Accept failed on socket %r", sock, exc_info=True) except exceptions.CancelledError: sock.close() else: self._accept_futures[sock.fileno()] = f f.add_done_callback(loop) self.call_soon(loop) def _process_events(self, event_list): # Events are processed in the IocpProactor._poll() method pass def _stop_accept_futures(self): for future in self._accept_futures.values(): future.cancel() self._accept_futures.clear() def _stop_serving(self, sock): future = self._accept_futures.pop(sock.fileno(), None) if future: future.cancel() self._proactor._stop_serving(sock) sock.close() base_tasks.py 0000644 00000004643 15030732674 0007253 0 ustar 00 import linecache import traceback from . import base_futures from . import coroutines def _task_repr_info(task): info = base_futures._future_repr_info(task) if task._must_cancel: # replace status info[0] = 'cancelling' info.insert(1, 'name=%r' % task.get_name()) coro = coroutines._format_coroutine(task._coro) info.insert(2, f'coro=<{coro}>') if task._fut_waiter is not None: info.insert(3, f'wait_for={task._fut_waiter!r}') return info def _task_get_stack(task, limit): frames = [] if hasattr(task._coro, 'cr_frame'): # case 1: 'async def' coroutines f = task._coro.cr_frame elif hasattr(task._coro, 'gi_frame'): # case 2: legacy coroutines f = task._coro.gi_frame elif hasattr(task._coro, 'ag_frame'): # case 3: async generators f = task._coro.ag_frame else: # case 4: unknown objects f = None if f is not None: while f is not None: if limit is not None: if limit <= 0: break limit -= 1 frames.append(f) f = f.f_back frames.reverse() elif task._exception is not None: tb = task._exception.__traceback__ while tb is not None: if limit is not None: if limit <= 0: break limit -= 1 frames.append(tb.tb_frame) tb = tb.tb_next return frames def _task_print_stack(task, limit, file): extracted_list = [] checked = set() for f in task.get_stack(limit=limit): lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name if filename not in checked: checked.add(filename) linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) extracted_list.append((filename, lineno, name, line)) exc = task._exception if not extracted_list: print(f'No stack for {task!r}', file=file) elif exc is not None: print(f'Traceback for {task!r} (most recent call last):', file=file) else: print(f'Stack for {task!r} (most recent call last):', file=file) traceback.print_list(extracted_list, file=file) if exc is not None: for line in traceback.format_exception_only(exc.__class__, exc): print(line, file=file, end='') trsock.py 0000644 00000013364 15030732674 0006441 0 ustar 00 import socket import warnings class TransportSocket: """A socket-like wrapper for exposing real transport sockets. These objects can be safely returned by APIs like `transport.get_extra_info('socket')`. All potentially disruptive operations (like "socket.close()") are banned. """ __slots__ = ('_sock',) def __init__(self, sock: socket.socket): self._sock = sock def _na(self, what): warnings.warn( f"Using {what} on sockets returned from get_extra_info('socket') " f"will be prohibited in asyncio 3.9. Please report your use case " f"to bugs.python.org.", DeprecationWarning, source=self) @property def family(self): return self._sock.family @property def type(self): return self._sock.type @property def proto(self): return self._sock.proto def __repr__(self): s = ( f"<asyncio.TransportSocket fd={self.fileno()}, " f"family={self.family!s}, type={self.type!s}, " f"proto={self.proto}" ) if self.fileno() != -1: try: laddr = self.getsockname() if laddr: s = f"{s}, laddr={laddr}" except socket.error: pass try: raddr = self.getpeername() if raddr: s = f"{s}, raddr={raddr}" except socket.error: pass return f"{s}>" def __getstate__(self): raise TypeError("Cannot serialize asyncio.TransportSocket object") def fileno(self): return self._sock.fileno() def dup(self): return self._sock.dup() def get_inheritable(self): return self._sock.get_inheritable() def shutdown(self, how): # asyncio doesn't currently provide a high-level transport API # to shutdown the connection. self._sock.shutdown(how) def getsockopt(self, *args, **kwargs): return self._sock.getsockopt(*args, **kwargs) def setsockopt(self, *args, **kwargs): self._sock.setsockopt(*args, **kwargs) def getpeername(self): return self._sock.getpeername() def getsockname(self): return self._sock.getsockname() def getsockbyname(self): return self._sock.getsockbyname() def accept(self): self._na('accept() method') return self._sock.accept() def connect(self, *args, **kwargs): self._na('connect() method') return self._sock.connect(*args, **kwargs) def connect_ex(self, *args, **kwargs): self._na('connect_ex() method') return self._sock.connect_ex(*args, **kwargs) def bind(self, *args, **kwargs): self._na('bind() method') return self._sock.bind(*args, **kwargs) def ioctl(self, *args, **kwargs): self._na('ioctl() method') return self._sock.ioctl(*args, **kwargs) def listen(self, *args, **kwargs): self._na('listen() method') return self._sock.listen(*args, **kwargs) def makefile(self): self._na('makefile() method') return self._sock.makefile() def sendfile(self, *args, **kwargs): self._na('sendfile() method') return self._sock.sendfile(*args, **kwargs) def close(self): self._na('close() method') return self._sock.close() def detach(self): self._na('detach() method') return self._sock.detach() def sendmsg_afalg(self, *args, **kwargs): self._na('sendmsg_afalg() method') return self._sock.sendmsg_afalg(*args, **kwargs) def sendmsg(self, *args, **kwargs): self._na('sendmsg() method') return self._sock.sendmsg(*args, **kwargs) def sendto(self, *args, **kwargs): self._na('sendto() method') return self._sock.sendto(*args, **kwargs) def send(self, *args, **kwargs): self._na('send() method') return self._sock.send(*args, **kwargs) def sendall(self, *args, **kwargs): self._na('sendall() method') return self._sock.sendall(*args, **kwargs) def set_inheritable(self, *args, **kwargs): self._na('set_inheritable() method') return self._sock.set_inheritable(*args, **kwargs) def share(self, process_id): self._na('share() method') return self._sock.share(process_id) def recv_into(self, *args, **kwargs): self._na('recv_into() method') return self._sock.recv_into(*args, **kwargs) def recvfrom_into(self, *args, **kwargs): self._na('recvfrom_into() method') return self._sock.recvfrom_into(*args, **kwargs) def recvmsg_into(self, *args, **kwargs): self._na('recvmsg_into() method') return self._sock.recvmsg_into(*args, **kwargs) def recvmsg(self, *args, **kwargs): self._na('recvmsg() method') return self._sock.recvmsg(*args, **kwargs) def recvfrom(self, *args, **kwargs): self._na('recvfrom() method') return self._sock.recvfrom(*args, **kwargs) def recv(self, *args, **kwargs): self._na('recv() method') return self._sock.recv(*args, **kwargs) def settimeout(self, value): if value == 0: return raise ValueError( 'settimeout(): only 0 timeout is allowed on transport sockets') def gettimeout(self): return 0 def setblocking(self, flag): if not flag: return raise ValueError( 'setblocking(): transport sockets cannot be blocking') def __enter__(self): self._na('context manager protocol') return self._sock.__enter__() def __exit__(self, *err): self._na('context manager protocol') return self._sock.__exit__(*err) windows_utils.py 0000644 00000011704 15030732674 0010042 0 ustar 00 """Various Windows specific bits and pieces.""" import sys if sys.platform != 'win32': # pragma: no cover raise ImportError('win32 only') import _winapi import itertools import msvcrt import os import subprocess import tempfile import warnings __all__ = 'pipe', 'Popen', 'PIPE', 'PipeHandle' # Constants/globals BUFSIZE = 8192 PIPE = subprocess.PIPE STDOUT = subprocess.STDOUT _mmap_counter = itertools.count() # Replacement for os.pipe() using handles instead of fds def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE): """Like os.pipe() but with overlapped support and using handles not fds.""" address = tempfile.mktemp( prefix=r'\\.\pipe\python-pipe-{:d}-{:d}-'.format( os.getpid(), next(_mmap_counter))) if duplex: openmode = _winapi.PIPE_ACCESS_DUPLEX access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE obsize, ibsize = bufsize, bufsize else: openmode = _winapi.PIPE_ACCESS_INBOUND access = _winapi.GENERIC_WRITE obsize, ibsize = 0, bufsize openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE if overlapped[0]: openmode |= _winapi.FILE_FLAG_OVERLAPPED if overlapped[1]: flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED else: flags_and_attribs = 0 h1 = h2 = None try: h1 = _winapi.CreateNamedPipe( address, openmode, _winapi.PIPE_WAIT, 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL) h2 = _winapi.CreateFile( address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, flags_and_attribs, _winapi.NULL) ov = _winapi.ConnectNamedPipe(h1, overlapped=True) ov.GetOverlappedResult(True) return h1, h2 except: if h1 is not None: _winapi.CloseHandle(h1) if h2 is not None: _winapi.CloseHandle(h2) raise # Wrapper for a pipe handle class PipeHandle: """Wrapper for an overlapped pipe handle which is vaguely file-object like. The IOCP event loop can use these instead of socket objects. """ def __init__(self, handle): self._handle = handle def __repr__(self): if self._handle is not None: handle = f'handle={self._handle!r}' else: handle = 'closed' return f'<{self.__class__.__name__} {handle}>' @property def handle(self): return self._handle def fileno(self): if self._handle is None: raise ValueError("I/O operation on closed pipe") return self._handle def close(self, *, CloseHandle=_winapi.CloseHandle): if self._handle is not None: CloseHandle(self._handle) self._handle = None def __del__(self, _warn=warnings.warn): if self._handle is not None: _warn(f"unclosed {self!r}", ResourceWarning, source=self) self.close() def __enter__(self): return self def __exit__(self, t, v, tb): self.close() # Replacement for subprocess.Popen using overlapped pipe handles class Popen(subprocess.Popen): """Replacement for subprocess.Popen using overlapped pipe handles. The stdin, stdout, stderr are None or instances of PipeHandle. """ def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds): assert not kwds.get('universal_newlines') assert kwds.get('bufsize', 0) == 0 stdin_rfd = stdout_wfd = stderr_wfd = None stdin_wh = stdout_rh = stderr_rh = None if stdin == PIPE: stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True) stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY) else: stdin_rfd = stdin if stdout == PIPE: stdout_rh, stdout_wh = pipe(overlapped=(True, False)) stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0) else: stdout_wfd = stdout if stderr == PIPE: stderr_rh, stderr_wh = pipe(overlapped=(True, False)) stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0) elif stderr == STDOUT: stderr_wfd = stdout_wfd else: stderr_wfd = stderr try: super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd, stderr=stderr_wfd, **kwds) except: for h in (stdin_wh, stdout_rh, stderr_rh): if h is not None: _winapi.CloseHandle(h) raise else: if stdin_wh is not None: self.stdin = PipeHandle(stdin_wh) if stdout_rh is not None: self.stdout = PipeHandle(stdout_rh) if stderr_rh is not None: self.stderr = PipeHandle(stderr_rh) finally: if stdin == PIPE: os.close(stdin_rfd) if stdout == PIPE: os.close(stdout_wfd) if stderr == PIPE: os.close(stderr_wfd) subprocess.py 0000644 00000017604 15030732674 0007325 0 ustar 00 __all__ = 'create_subprocess_exec', 'create_subprocess_shell' import subprocess import warnings from . import events from . import protocols from . import streams from . import tasks from .log import logger PIPE = subprocess.PIPE STDOUT = subprocess.STDOUT DEVNULL = subprocess.DEVNULL class SubprocessStreamProtocol(streams.FlowControlMixin, protocols.SubprocessProtocol): """Like StreamReaderProtocol, but for a subprocess.""" def __init__(self, limit, loop): super().__init__(loop=loop) self._limit = limit self.stdin = self.stdout = self.stderr = None self._transport = None self._process_exited = False self._pipe_fds = [] self._stdin_closed = self._loop.create_future() def __repr__(self): info = [self.__class__.__name__] if self.stdin is not None: info.append(f'stdin={self.stdin!r}') if self.stdout is not None: info.append(f'stdout={self.stdout!r}') if self.stderr is not None: info.append(f'stderr={self.stderr!r}') return '<{}>'.format(' '.join(info)) def connection_made(self, transport): self._transport = transport stdout_transport = transport.get_pipe_transport(1) if stdout_transport is not None: self.stdout = streams.StreamReader(limit=self._limit, loop=self._loop) self.stdout.set_transport(stdout_transport) self._pipe_fds.append(1) stderr_transport = transport.get_pipe_transport(2) if stderr_transport is not None: self.stderr = streams.StreamReader(limit=self._limit, loop=self._loop) self.stderr.set_transport(stderr_transport) self._pipe_fds.append(2) stdin_transport = transport.get_pipe_transport(0) if stdin_transport is not None: self.stdin = streams.StreamWriter(stdin_transport, protocol=self, reader=None, loop=self._loop) def pipe_data_received(self, fd, data): if fd == 1: reader = self.stdout elif fd == 2: reader = self.stderr else: reader = None if reader is not None: reader.feed_data(data) def pipe_connection_lost(self, fd, exc): if fd == 0: pipe = self.stdin if pipe is not None: pipe.close() self.connection_lost(exc) if exc is None: self._stdin_closed.set_result(None) else: self._stdin_closed.set_exception(exc) return if fd == 1: reader = self.stdout elif fd == 2: reader = self.stderr else: reader = None if reader is not None: if exc is None: reader.feed_eof() else: reader.set_exception(exc) if fd in self._pipe_fds: self._pipe_fds.remove(fd) self._maybe_close_transport() def process_exited(self): self._process_exited = True self._maybe_close_transport() def _maybe_close_transport(self): if len(self._pipe_fds) == 0 and self._process_exited: self._transport.close() self._transport = None def _get_close_waiter(self, stream): if stream is self.stdin: return self._stdin_closed class Process: def __init__(self, transport, protocol, loop): self._transport = transport self._protocol = protocol self._loop = loop self.stdin = protocol.stdin self.stdout = protocol.stdout self.stderr = protocol.stderr self.pid = transport.get_pid() def __repr__(self): return f'<{self.__class__.__name__} {self.pid}>' @property def returncode(self): return self._transport.get_returncode() async def wait(self): """Wait until the process exit and return the process return code.""" return await self._transport._wait() def send_signal(self, signal): self._transport.send_signal(signal) def terminate(self): self._transport.terminate() def kill(self): self._transport.kill() async def _feed_stdin(self, input): debug = self._loop.get_debug() self.stdin.write(input) if debug: logger.debug( '%r communicate: feed stdin (%s bytes)', self, len(input)) try: await self.stdin.drain() except (BrokenPipeError, ConnectionResetError) as exc: # communicate() ignores BrokenPipeError and ConnectionResetError if debug: logger.debug('%r communicate: stdin got %r', self, exc) if debug: logger.debug('%r communicate: close stdin', self) self.stdin.close() async def _noop(self): return None async def _read_stream(self, fd): transport = self._transport.get_pipe_transport(fd) if fd == 2: stream = self.stderr else: assert fd == 1 stream = self.stdout if self._loop.get_debug(): name = 'stdout' if fd == 1 else 'stderr' logger.debug('%r communicate: read %s', self, name) output = await stream.read() if self._loop.get_debug(): name = 'stdout' if fd == 1 else 'stderr' logger.debug('%r communicate: close %s', self, name) transport.close() return output async def communicate(self, input=None): if input is not None: stdin = self._feed_stdin(input) else: stdin = self._noop() if self.stdout is not None: stdout = self._read_stream(1) else: stdout = self._noop() if self.stderr is not None: stderr = self._read_stream(2) else: stderr = self._noop() stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr, loop=self._loop) await self.wait() return (stdout, stderr) async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None, loop=None, limit=streams._DEFAULT_LIMIT, **kwds): if loop is None: loop = events.get_event_loop() else: warnings.warn("The loop argument is deprecated since Python 3.8 " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2 ) protocol_factory = lambda: SubprocessStreamProtocol(limit=limit, loop=loop) transport, protocol = await loop.subprocess_shell( protocol_factory, cmd, stdin=stdin, stdout=stdout, stderr=stderr, **kwds) return Process(transport, protocol, loop) async def create_subprocess_exec(program, *args, stdin=None, stdout=None, stderr=None, loop=None, limit=streams._DEFAULT_LIMIT, **kwds): if loop is None: loop = events.get_event_loop() else: warnings.warn("The loop argument is deprecated since Python 3.8 " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2 ) protocol_factory = lambda: SubprocessStreamProtocol(limit=limit, loop=loop) transport, protocol = await loop.subprocess_exec( protocol_factory, program, *args, stdin=stdin, stdout=stdout, stderr=stderr, **kwds) return Process(transport, protocol, loop) log.py 0000644 00000000174 15030732674 0005710 0 ustar 00 """Logging configuration.""" import logging # Name the logger after the package. logger = logging.getLogger(__package__) windows_events.py 0000644 00000100151 15030732674 0010201 0 ustar 00 """Selector and proactor event loops for Windows.""" import _overlapped import _winapi import errno import math import msvcrt import socket import struct import time import weakref from . import events from . import base_subprocess from . import futures from . import exceptions from . import proactor_events from . import selector_events from . import tasks from . import windows_utils from .log import logger __all__ = ( 'SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor', 'DefaultEventLoopPolicy', 'WindowsSelectorEventLoopPolicy', 'WindowsProactorEventLoopPolicy', ) NULL = 0 INFINITE = 0xffffffff ERROR_CONNECTION_REFUSED = 1225 ERROR_CONNECTION_ABORTED = 1236 # Initial delay in seconds for connect_pipe() before retrying to connect CONNECT_PIPE_INIT_DELAY = 0.001 # Maximum delay in seconds for connect_pipe() before retrying to connect CONNECT_PIPE_MAX_DELAY = 0.100 class _OverlappedFuture(futures.Future): """Subclass of Future which represents an overlapped operation. Cancelling it will immediately cancel the overlapped operation. """ def __init__(self, ov, *, loop=None): super().__init__(loop=loop) if self._source_traceback: del self._source_traceback[-1] self._ov = ov def _repr_info(self): info = super()._repr_info() if self._ov is not None: state = 'pending' if self._ov.pending else 'completed' info.insert(1, f'overlapped=<{state}, {self._ov.address:#x}>') return info def _cancel_overlapped(self): if self._ov is None: return try: self._ov.cancel() except OSError as exc: context = { 'message': 'Cancelling an overlapped future failed', 'exception': exc, 'future': self, } if self._source_traceback: context['source_traceback'] = self._source_traceback self._loop.call_exception_handler(context) self._ov = None def cancel(self): self._cancel_overlapped() return super().cancel() def set_exception(self, exception): super().set_exception(exception) self._cancel_overlapped() def set_result(self, result): super().set_result(result) self._ov = None class _BaseWaitHandleFuture(futures.Future): """Subclass of Future which represents a wait handle.""" def __init__(self, ov, handle, wait_handle, *, loop=None): super().__init__(loop=loop) if self._source_traceback: del self._source_traceback[-1] # Keep a reference to the Overlapped object to keep it alive until the # wait is unregistered self._ov = ov self._handle = handle self._wait_handle = wait_handle # Should we call UnregisterWaitEx() if the wait completes # or is cancelled? self._registered = True def _poll(self): # non-blocking wait: use a timeout of 0 millisecond return (_winapi.WaitForSingleObject(self._handle, 0) == _winapi.WAIT_OBJECT_0) def _repr_info(self): info = super()._repr_info() info.append(f'handle={self._handle:#x}') if self._handle is not None: state = 'signaled' if self._poll() else 'waiting' info.append(state) if self._wait_handle is not None: info.append(f'wait_handle={self._wait_handle:#x}') return info def _unregister_wait_cb(self, fut): # The wait was unregistered: it's not safe to destroy the Overlapped # object self._ov = None def _unregister_wait(self): if not self._registered: return self._registered = False wait_handle = self._wait_handle self._wait_handle = None try: _overlapped.UnregisterWait(wait_handle) except OSError as exc: if exc.winerror != _overlapped.ERROR_IO_PENDING: context = { 'message': 'Failed to unregister the wait handle', 'exception': exc, 'future': self, } if self._source_traceback: context['source_traceback'] = self._source_traceback self._loop.call_exception_handler(context) return # ERROR_IO_PENDING means that the unregister is pending self._unregister_wait_cb(None) def cancel(self): self._unregister_wait() return super().cancel() def set_exception(self, exception): self._unregister_wait() super().set_exception(exception) def set_result(self, result): self._unregister_wait() super().set_result(result) class _WaitCancelFuture(_BaseWaitHandleFuture): """Subclass of Future which represents a wait for the cancellation of a _WaitHandleFuture using an event. """ def __init__(self, ov, event, wait_handle, *, loop=None): super().__init__(ov, event, wait_handle, loop=loop) self._done_callback = None def cancel(self): raise RuntimeError("_WaitCancelFuture must not be cancelled") def set_result(self, result): super().set_result(result) if self._done_callback is not None: self._done_callback(self) def set_exception(self, exception): super().set_exception(exception) if self._done_callback is not None: self._done_callback(self) class _WaitHandleFuture(_BaseWaitHandleFuture): def __init__(self, ov, handle, wait_handle, proactor, *, loop=None): super().__init__(ov, handle, wait_handle, loop=loop) self._proactor = proactor self._unregister_proactor = True self._event = _overlapped.CreateEvent(None, True, False, None) self._event_fut = None def _unregister_wait_cb(self, fut): if self._event is not None: _winapi.CloseHandle(self._event) self._event = None self._event_fut = None # If the wait was cancelled, the wait may never be signalled, so # it's required to unregister it. Otherwise, IocpProactor.close() will # wait forever for an event which will never come. # # If the IocpProactor already received the event, it's safe to call # _unregister() because we kept a reference to the Overlapped object # which is used as a unique key. self._proactor._unregister(self._ov) self._proactor = None super()._unregister_wait_cb(fut) def _unregister_wait(self): if not self._registered: return self._registered = False wait_handle = self._wait_handle self._wait_handle = None try: _overlapped.UnregisterWaitEx(wait_handle, self._event) except OSError as exc: if exc.winerror != _overlapped.ERROR_IO_PENDING: context = { 'message': 'Failed to unregister the wait handle', 'exception': exc, 'future': self, } if self._source_traceback: context['source_traceback'] = self._source_traceback self._loop.call_exception_handler(context) return # ERROR_IO_PENDING is not an error, the wait was unregistered self._event_fut = self._proactor._wait_cancel(self._event, self._unregister_wait_cb) class PipeServer(object): """Class representing a pipe server. This is much like a bound, listening socket. """ def __init__(self, address): self._address = address self._free_instances = weakref.WeakSet() # initialize the pipe attribute before calling _server_pipe_handle() # because this function can raise an exception and the destructor calls # the close() method self._pipe = None self._accept_pipe_future = None self._pipe = self._server_pipe_handle(True) def _get_unconnected_pipe(self): # Create new instance and return previous one. This ensures # that (until the server is closed) there is always at least # one pipe handle for address. Therefore if a client attempt # to connect it will not fail with FileNotFoundError. tmp, self._pipe = self._pipe, self._server_pipe_handle(False) return tmp def _server_pipe_handle(self, first): # Return a wrapper for a new pipe handle. if self.closed(): return None flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED if first: flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE h = _winapi.CreateNamedPipe( self._address, flags, _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | _winapi.PIPE_WAIT, _winapi.PIPE_UNLIMITED_INSTANCES, windows_utils.BUFSIZE, windows_utils.BUFSIZE, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL) pipe = windows_utils.PipeHandle(h) self._free_instances.add(pipe) return pipe def closed(self): return (self._address is None) def close(self): if self._accept_pipe_future is not None: self._accept_pipe_future.cancel() self._accept_pipe_future = None # Close all instances which have not been connected to by a client. if self._address is not None: for pipe in self._free_instances: pipe.close() self._pipe = None self._address = None self._free_instances.clear() __del__ = close class _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop): """Windows version of selector event loop.""" class ProactorEventLoop(proactor_events.BaseProactorEventLoop): """Windows version of proactor event loop using IOCP.""" def __init__(self, proactor=None): if proactor is None: proactor = IocpProactor() super().__init__(proactor) def run_forever(self): try: assert self._self_reading_future is None self.call_soon(self._loop_self_reading) super().run_forever() finally: if self._self_reading_future is not None: ov = self._self_reading_future._ov self._self_reading_future.cancel() # self_reading_future was just cancelled so if it hasn't been # finished yet, it never will be (it's possible that it has # already finished and its callback is waiting in the queue, # where it could still happen if the event loop is restarted). # Unregister it otherwise IocpProactor.close will wait for it # forever if ov is not None: self._proactor._unregister(ov) self._self_reading_future = None async def create_pipe_connection(self, protocol_factory, address): f = self._proactor.connect_pipe(address) pipe = await f protocol = protocol_factory() trans = self._make_duplex_pipe_transport(pipe, protocol, extra={'addr': address}) return trans, protocol async def start_serving_pipe(self, protocol_factory, address): server = PipeServer(address) def loop_accept_pipe(f=None): pipe = None try: if f: pipe = f.result() server._free_instances.discard(pipe) if server.closed(): # A client connected before the server was closed: # drop the client (close the pipe) and exit pipe.close() return protocol = protocol_factory() self._make_duplex_pipe_transport( pipe, protocol, extra={'addr': address}) pipe = server._get_unconnected_pipe() if pipe is None: return f = self._proactor.accept_pipe(pipe) except OSError as exc: if pipe and pipe.fileno() != -1: self.call_exception_handler({ 'message': 'Pipe accept failed', 'exception': exc, 'pipe': pipe, }) pipe.close() elif self._debug: logger.warning("Accept pipe failed on pipe %r", pipe, exc_info=True) except exceptions.CancelledError: if pipe: pipe.close() else: server._accept_pipe_future = f f.add_done_callback(loop_accept_pipe) self.call_soon(loop_accept_pipe) return [server] async def _make_subprocess_transport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, extra=None, **kwargs): waiter = self.create_future() transp = _WindowsSubprocessTransport(self, protocol, args, shell, stdin, stdout, stderr, bufsize, waiter=waiter, extra=extra, **kwargs) try: await waiter except (SystemExit, KeyboardInterrupt): raise except BaseException: transp.close() await transp._wait() raise return transp class IocpProactor: """Proactor implementation using IOCP.""" def __init__(self, concurrency=0xffffffff): self._loop = None self._results = [] self._iocp = _overlapped.CreateIoCompletionPort( _overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency) self._cache = {} self._registered = weakref.WeakSet() self._unregistered = [] self._stopped_serving = weakref.WeakSet() def _check_closed(self): if self._iocp is None: raise RuntimeError('IocpProactor is closed') def __repr__(self): info = ['overlapped#=%s' % len(self._cache), 'result#=%s' % len(self._results)] if self._iocp is None: info.append('closed') return '<%s %s>' % (self.__class__.__name__, " ".join(info)) def set_loop(self, loop): self._loop = loop def select(self, timeout=None): if not self._results: self._poll(timeout) tmp = self._results self._results = [] return tmp def _result(self, value): fut = self._loop.create_future() fut.set_result(value) return fut def recv(self, conn, nbytes, flags=0): self._register_with_iocp(conn) ov = _overlapped.Overlapped(NULL) try: if isinstance(conn, socket.socket): ov.WSARecv(conn.fileno(), nbytes, flags) else: ov.ReadFile(conn.fileno(), nbytes) except BrokenPipeError: return self._result(b'') def finish_recv(trans, key, ov): try: return ov.getresult() except OSError as exc: if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, _overlapped.ERROR_OPERATION_ABORTED): raise ConnectionResetError(*exc.args) else: raise return self._register(ov, conn, finish_recv) def recv_into(self, conn, buf, flags=0): self._register_with_iocp(conn) ov = _overlapped.Overlapped(NULL) try: if isinstance(conn, socket.socket): ov.WSARecvInto(conn.fileno(), buf, flags) else: ov.ReadFileInto(conn.fileno(), buf) except BrokenPipeError: return self._result(0) def finish_recv(trans, key, ov): try: return ov.getresult() except OSError as exc: if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, _overlapped.ERROR_OPERATION_ABORTED): raise ConnectionResetError(*exc.args) else: raise return self._register(ov, conn, finish_recv) def recvfrom(self, conn, nbytes, flags=0): self._register_with_iocp(conn) ov = _overlapped.Overlapped(NULL) try: ov.WSARecvFrom(conn.fileno(), nbytes, flags) except BrokenPipeError: return self._result((b'', None)) def finish_recv(trans, key, ov): try: return ov.getresult() except OSError as exc: if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, _overlapped.ERROR_OPERATION_ABORTED): raise ConnectionResetError(*exc.args) else: raise return self._register(ov, conn, finish_recv) def sendto(self, conn, buf, flags=0, addr=None): self._register_with_iocp(conn) ov = _overlapped.Overlapped(NULL) ov.WSASendTo(conn.fileno(), buf, flags, addr) def finish_send(trans, key, ov): try: return ov.getresult() except OSError as exc: if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, _overlapped.ERROR_OPERATION_ABORTED): raise ConnectionResetError(*exc.args) else: raise return self._register(ov, conn, finish_send) def send(self, conn, buf, flags=0): self._register_with_iocp(conn) ov = _overlapped.Overlapped(NULL) if isinstance(conn, socket.socket): ov.WSASend(conn.fileno(), buf, flags) else: ov.WriteFile(conn.fileno(), buf) def finish_send(trans, key, ov): try: return ov.getresult() except OSError as exc: if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, _overlapped.ERROR_OPERATION_ABORTED): raise ConnectionResetError(*exc.args) else: raise return self._register(ov, conn, finish_send) def accept(self, listener): self._register_with_iocp(listener) conn = self._get_accept_socket(listener.family) ov = _overlapped.Overlapped(NULL) ov.AcceptEx(listener.fileno(), conn.fileno()) def finish_accept(trans, key, ov): ov.getresult() # Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work. buf = struct.pack('@P', listener.fileno()) conn.setsockopt(socket.SOL_SOCKET, _overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf) conn.settimeout(listener.gettimeout()) return conn, conn.getpeername() async def accept_coro(future, conn): # Coroutine closing the accept socket if the future is cancelled try: await future except exceptions.CancelledError: conn.close() raise future = self._register(ov, listener, finish_accept) coro = accept_coro(future, conn) tasks.ensure_future(coro, loop=self._loop) return future def connect(self, conn, address): if conn.type == socket.SOCK_DGRAM: # WSAConnect will complete immediately for UDP sockets so we don't # need to register any IOCP operation _overlapped.WSAConnect(conn.fileno(), address) fut = self._loop.create_future() fut.set_result(None) return fut self._register_with_iocp(conn) # The socket needs to be locally bound before we call ConnectEx(). try: _overlapped.BindLocal(conn.fileno(), conn.family) except OSError as e: if e.winerror != errno.WSAEINVAL: raise # Probably already locally bound; check using getsockname(). if conn.getsockname()[1] == 0: raise ov = _overlapped.Overlapped(NULL) ov.ConnectEx(conn.fileno(), address) def finish_connect(trans, key, ov): ov.getresult() # Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work. conn.setsockopt(socket.SOL_SOCKET, _overlapped.SO_UPDATE_CONNECT_CONTEXT, 0) return conn return self._register(ov, conn, finish_connect) def sendfile(self, sock, file, offset, count): self._register_with_iocp(sock) ov = _overlapped.Overlapped(NULL) offset_low = offset & 0xffff_ffff offset_high = (offset >> 32) & 0xffff_ffff ov.TransmitFile(sock.fileno(), msvcrt.get_osfhandle(file.fileno()), offset_low, offset_high, count, 0, 0) def finish_sendfile(trans, key, ov): try: return ov.getresult() except OSError as exc: if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, _overlapped.ERROR_OPERATION_ABORTED): raise ConnectionResetError(*exc.args) else: raise return self._register(ov, sock, finish_sendfile) def accept_pipe(self, pipe): self._register_with_iocp(pipe) ov = _overlapped.Overlapped(NULL) connected = ov.ConnectNamedPipe(pipe.fileno()) if connected: # ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means # that the pipe is connected. There is no need to wait for the # completion of the connection. return self._result(pipe) def finish_accept_pipe(trans, key, ov): ov.getresult() return pipe return self._register(ov, pipe, finish_accept_pipe) async def connect_pipe(self, address): delay = CONNECT_PIPE_INIT_DELAY while True: # Unfortunately there is no way to do an overlapped connect to # a pipe. Call CreateFile() in a loop until it doesn't fail with # ERROR_PIPE_BUSY. try: handle = _overlapped.ConnectPipe(address) break except OSError as exc: if exc.winerror != _overlapped.ERROR_PIPE_BUSY: raise # ConnectPipe() failed with ERROR_PIPE_BUSY: retry later delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY) await tasks.sleep(delay) return windows_utils.PipeHandle(handle) def wait_for_handle(self, handle, timeout=None): """Wait for a handle. Return a Future object. The result of the future is True if the wait completed, or False if the wait did not complete (on timeout). """ return self._wait_for_handle(handle, timeout, False) def _wait_cancel(self, event, done_callback): fut = self._wait_for_handle(event, None, True) # add_done_callback() cannot be used because the wait may only complete # in IocpProactor.close(), while the event loop is not running. fut._done_callback = done_callback return fut def _wait_for_handle(self, handle, timeout, _is_cancel): self._check_closed() if timeout is None: ms = _winapi.INFINITE else: # RegisterWaitForSingleObject() has a resolution of 1 millisecond, # round away from zero to wait *at least* timeout seconds. ms = math.ceil(timeout * 1e3) # We only create ov so we can use ov.address as a key for the cache. ov = _overlapped.Overlapped(NULL) wait_handle = _overlapped.RegisterWaitWithQueue( handle, self._iocp, ov.address, ms) if _is_cancel: f = _WaitCancelFuture(ov, handle, wait_handle, loop=self._loop) else: f = _WaitHandleFuture(ov, handle, wait_handle, self, loop=self._loop) if f._source_traceback: del f._source_traceback[-1] def finish_wait_for_handle(trans, key, ov): # Note that this second wait means that we should only use # this with handles types where a successful wait has no # effect. So events or processes are all right, but locks # or semaphores are not. Also note if the handle is # signalled and then quickly reset, then we may return # False even though we have not timed out. return f._poll() self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle) return f def _register_with_iocp(self, obj): # To get notifications of finished ops on this objects sent to the # completion port, were must register the handle. if obj not in self._registered: self._registered.add(obj) _overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0) # XXX We could also use SetFileCompletionNotificationModes() # to avoid sending notifications to completion port of ops # that succeed immediately. def _register(self, ov, obj, callback): self._check_closed() # Return a future which will be set with the result of the # operation when it completes. The future's value is actually # the value returned by callback(). f = _OverlappedFuture(ov, loop=self._loop) if f._source_traceback: del f._source_traceback[-1] if not ov.pending: # The operation has completed, so no need to postpone the # work. We cannot take this short cut if we need the # NumberOfBytes, CompletionKey values returned by # PostQueuedCompletionStatus(). try: value = callback(None, None, ov) except OSError as e: f.set_exception(e) else: f.set_result(value) # Even if GetOverlappedResult() was called, we have to wait for the # notification of the completion in GetQueuedCompletionStatus(). # Register the overlapped operation to keep a reference to the # OVERLAPPED object, otherwise the memory is freed and Windows may # read uninitialized memory. # Register the overlapped operation for later. Note that # we only store obj to prevent it from being garbage # collected too early. self._cache[ov.address] = (f, ov, obj, callback) return f def _unregister(self, ov): """Unregister an overlapped object. Call this method when its future has been cancelled. The event can already be signalled (pending in the proactor event queue). It is also safe if the event is never signalled (because it was cancelled). """ self._check_closed() self._unregistered.append(ov) def _get_accept_socket(self, family): s = socket.socket(family) s.settimeout(0) return s def _poll(self, timeout=None): if timeout is None: ms = INFINITE elif timeout < 0: raise ValueError("negative timeout") else: # GetQueuedCompletionStatus() has a resolution of 1 millisecond, # round away from zero to wait *at least* timeout seconds. ms = math.ceil(timeout * 1e3) if ms >= INFINITE: raise ValueError("timeout too big") while True: status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms) if status is None: break ms = 0 err, transferred, key, address = status try: f, ov, obj, callback = self._cache.pop(address) except KeyError: if self._loop.get_debug(): self._loop.call_exception_handler({ 'message': ('GetQueuedCompletionStatus() returned an ' 'unexpected event'), 'status': ('err=%s transferred=%s key=%#x address=%#x' % (err, transferred, key, address)), }) # key is either zero, or it is used to return a pipe # handle which should be closed to avoid a leak. if key not in (0, _overlapped.INVALID_HANDLE_VALUE): _winapi.CloseHandle(key) continue if obj in self._stopped_serving: f.cancel() # Don't call the callback if _register() already read the result or # if the overlapped has been cancelled elif not f.done(): try: value = callback(transferred, key, ov) except OSError as e: f.set_exception(e) self._results.append(f) else: f.set_result(value) self._results.append(f) # Remove unregistered futures for ov in self._unregistered: self._cache.pop(ov.address, None) self._unregistered.clear() def _stop_serving(self, obj): # obj is a socket or pipe handle. It will be closed in # BaseProactorEventLoop._stop_serving() which will make any # pending operations fail quickly. self._stopped_serving.add(obj) def close(self): if self._iocp is None: # already closed return # Cancel remaining registered operations. for address, (fut, ov, obj, callback) in list(self._cache.items()): if fut.cancelled(): # Nothing to do with cancelled futures pass elif isinstance(fut, _WaitCancelFuture): # _WaitCancelFuture must not be cancelled pass else: try: fut.cancel() except OSError as exc: if self._loop is not None: context = { 'message': 'Cancelling a future failed', 'exception': exc, 'future': fut, } if fut._source_traceback: context['source_traceback'] = fut._source_traceback self._loop.call_exception_handler(context) # Wait until all cancelled overlapped complete: don't exit with running # overlapped to prevent a crash. Display progress every second if the # loop is still running. msg_update = 1.0 start_time = time.monotonic() next_msg = start_time + msg_update while self._cache: if next_msg <= time.monotonic(): logger.debug('%r is running after closing for %.1f seconds', self, time.monotonic() - start_time) next_msg = time.monotonic() + msg_update # handle a few events, or timeout self._poll(msg_update) self._results = [] _winapi.CloseHandle(self._iocp) self._iocp = None def __del__(self): self.close() class _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport): def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): self._proc = windows_utils.Popen( args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, bufsize=bufsize, **kwargs) def callback(f): returncode = self._proc.poll() self._process_exited(returncode) f = self._loop._proactor.wait_for_handle(int(self._proc._handle)) f.add_done_callback(callback) SelectorEventLoop = _WindowsSelectorEventLoop class WindowsSelectorEventLoopPolicy(events.BaseDefaultEventLoopPolicy): _loop_factory = SelectorEventLoop class WindowsProactorEventLoopPolicy(events.BaseDefaultEventLoopPolicy): _loop_factory = ProactorEventLoop DefaultEventLoopPolicy = WindowsProactorEventLoopPolicy streams.py 0000644 00000064040 15030732674 0006607 0 ustar 00 __all__ = ( 'StreamReader', 'StreamWriter', 'StreamReaderProtocol', 'open_connection', 'start_server') import socket import sys import warnings import weakref if hasattr(socket, 'AF_UNIX'): __all__ += ('open_unix_connection', 'start_unix_server') from . import coroutines from . import events from . import exceptions from . import format_helpers from . import protocols from .log import logger from .tasks import sleep _DEFAULT_LIMIT = 2 ** 16 # 64 KiB async def open_connection(host=None, port=None, *, loop=None, limit=_DEFAULT_LIMIT, **kwds): """A wrapper for create_connection() returning a (reader, writer) pair. The reader returned is a StreamReader instance; the writer is a StreamWriter instance. The arguments are all the usual arguments to create_connection() except protocol_factory; most common are positional host and port, with various optional keyword arguments following. Additional optional keyword arguments are loop (to set the event loop instance to use) and limit (to set the buffer limit passed to the StreamReader). (If you want to customize the StreamReader and/or StreamReaderProtocol classes, just copy the code -- there's really nothing special here except some convenience.) """ if loop is None: loop = events.get_event_loop() else: warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) reader = StreamReader(limit=limit, loop=loop) protocol = StreamReaderProtocol(reader, loop=loop) transport, _ = await loop.create_connection( lambda: protocol, host, port, **kwds) writer = StreamWriter(transport, protocol, reader, loop) return reader, writer async def start_server(client_connected_cb, host=None, port=None, *, loop=None, limit=_DEFAULT_LIMIT, **kwds): """Start a socket server, call back for each client connected. The first parameter, `client_connected_cb`, takes two parameters: client_reader, client_writer. client_reader is a StreamReader object, while client_writer is a StreamWriter object. This parameter can either be a plain callback function or a coroutine; if it is a coroutine, it will be automatically converted into a Task. The rest of the arguments are all the usual arguments to loop.create_server() except protocol_factory; most common are positional host and port, with various optional keyword arguments following. The return value is the same as loop.create_server(). Additional optional keyword arguments are loop (to set the event loop instance to use) and limit (to set the buffer limit passed to the StreamReader). The return value is the same as loop.create_server(), i.e. a Server object which can be used to stop the service. """ if loop is None: loop = events.get_event_loop() else: warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) def factory(): reader = StreamReader(limit=limit, loop=loop) protocol = StreamReaderProtocol(reader, client_connected_cb, loop=loop) return protocol return await loop.create_server(factory, host, port, **kwds) if hasattr(socket, 'AF_UNIX'): # UNIX Domain Sockets are supported on this platform async def open_unix_connection(path=None, *, loop=None, limit=_DEFAULT_LIMIT, **kwds): """Similar to `open_connection` but works with UNIX Domain Sockets.""" if loop is None: loop = events.get_event_loop() else: warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) reader = StreamReader(limit=limit, loop=loop) protocol = StreamReaderProtocol(reader, loop=loop) transport, _ = await loop.create_unix_connection( lambda: protocol, path, **kwds) writer = StreamWriter(transport, protocol, reader, loop) return reader, writer async def start_unix_server(client_connected_cb, path=None, *, loop=None, limit=_DEFAULT_LIMIT, **kwds): """Similar to `start_server` but works with UNIX Domain Sockets.""" if loop is None: loop = events.get_event_loop() else: warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) def factory(): reader = StreamReader(limit=limit, loop=loop) protocol = StreamReaderProtocol(reader, client_connected_cb, loop=loop) return protocol return await loop.create_unix_server(factory, path, **kwds) class FlowControlMixin(protocols.Protocol): """Reusable flow control logic for StreamWriter.drain(). This implements the protocol methods pause_writing(), resume_writing() and connection_lost(). If the subclass overrides these it must call the super methods. StreamWriter.drain() must wait for _drain_helper() coroutine. """ def __init__(self, loop=None): if loop is None: self._loop = events.get_event_loop() else: self._loop = loop self._paused = False self._drain_waiter = None self._connection_lost = False def pause_writing(self): assert not self._paused self._paused = True if self._loop.get_debug(): logger.debug("%r pauses writing", self) def resume_writing(self): assert self._paused self._paused = False if self._loop.get_debug(): logger.debug("%r resumes writing", self) waiter = self._drain_waiter if waiter is not None: self._drain_waiter = None if not waiter.done(): waiter.set_result(None) def connection_lost(self, exc): self._connection_lost = True # Wake up the writer if currently paused. if not self._paused: return waiter = self._drain_waiter if waiter is None: return self._drain_waiter = None if waiter.done(): return if exc is None: waiter.set_result(None) else: waiter.set_exception(exc) async def _drain_helper(self): if self._connection_lost: raise ConnectionResetError('Connection lost') if not self._paused: return waiter = self._drain_waiter assert waiter is None or waiter.cancelled() waiter = self._loop.create_future() self._drain_waiter = waiter await waiter def _get_close_waiter(self, stream): raise NotImplementedError class StreamReaderProtocol(FlowControlMixin, protocols.Protocol): """Helper class to adapt between Protocol and StreamReader. (This is a helper class instead of making StreamReader itself a Protocol subclass, because the StreamReader has other potential uses, and to prevent the user of the StreamReader to accidentally call inappropriate methods of the protocol.) """ _source_traceback = None def __init__(self, stream_reader, client_connected_cb=None, loop=None): super().__init__(loop=loop) if stream_reader is not None: self._stream_reader_wr = weakref.ref(stream_reader) self._source_traceback = stream_reader._source_traceback else: self._stream_reader_wr = None if client_connected_cb is not None: # This is a stream created by the `create_server()` function. # Keep a strong reference to the reader until a connection # is established. self._strong_reader = stream_reader self._reject_connection = False self._stream_writer = None self._transport = None self._client_connected_cb = client_connected_cb self._over_ssl = False self._closed = self._loop.create_future() @property def _stream_reader(self): if self._stream_reader_wr is None: return None return self._stream_reader_wr() def connection_made(self, transport): if self._reject_connection: context = { 'message': ('An open stream was garbage collected prior to ' 'establishing network connection; ' 'call "stream.close()" explicitly.') } if self._source_traceback: context['source_traceback'] = self._source_traceback self._loop.call_exception_handler(context) transport.abort() return self._transport = transport reader = self._stream_reader if reader is not None: reader.set_transport(transport) self._over_ssl = transport.get_extra_info('sslcontext') is not None if self._client_connected_cb is not None: self._stream_writer = StreamWriter(transport, self, reader, self._loop) res = self._client_connected_cb(reader, self._stream_writer) if coroutines.iscoroutine(res): self._loop.create_task(res) self._strong_reader = None def connection_lost(self, exc): reader = self._stream_reader if reader is not None: if exc is None: reader.feed_eof() else: reader.set_exception(exc) if not self._closed.done(): if exc is None: self._closed.set_result(None) else: self._closed.set_exception(exc) super().connection_lost(exc) self._stream_reader_wr = None self._stream_writer = None self._transport = None def data_received(self, data): reader = self._stream_reader if reader is not None: reader.feed_data(data) def eof_received(self): reader = self._stream_reader if reader is not None: reader.feed_eof() if self._over_ssl: # Prevent a warning in SSLProtocol.eof_received: # "returning true from eof_received() # has no effect when using ssl" return False return True def _get_close_waiter(self, stream): return self._closed def __del__(self): # Prevent reports about unhandled exceptions. # Better than self._closed._log_traceback = False hack closed = self._closed if closed.done() and not closed.cancelled(): closed.exception() class StreamWriter: """Wraps a Transport. This exposes write(), writelines(), [can_]write_eof(), get_extra_info() and close(). It adds drain() which returns an optional Future on which you can wait for flow control. It also adds a transport property which references the Transport directly. """ def __init__(self, transport, protocol, reader, loop): self._transport = transport self._protocol = protocol # drain() expects that the reader has an exception() method assert reader is None or isinstance(reader, StreamReader) self._reader = reader self._loop = loop self._complete_fut = self._loop.create_future() self._complete_fut.set_result(None) def __repr__(self): info = [self.__class__.__name__, f'transport={self._transport!r}'] if self._reader is not None: info.append(f'reader={self._reader!r}') return '<{}>'.format(' '.join(info)) @property def transport(self): return self._transport def write(self, data): self._transport.write(data) def writelines(self, data): self._transport.writelines(data) def write_eof(self): return self._transport.write_eof() def can_write_eof(self): return self._transport.can_write_eof() def close(self): return self._transport.close() def is_closing(self): return self._transport.is_closing() async def wait_closed(self): await self._protocol._get_close_waiter(self) def get_extra_info(self, name, default=None): return self._transport.get_extra_info(name, default) async def drain(self): """Flush the write buffer. The intended use is to write w.write(data) await w.drain() """ if self._reader is not None: exc = self._reader.exception() if exc is not None: raise exc if self._transport.is_closing(): # Wait for protocol.connection_lost() call # Raise connection closing error if any, # ConnectionResetError otherwise # Yield to the event loop so connection_lost() may be # called. Without this, _drain_helper() would return # immediately, and code that calls # write(...); await drain() # in a loop would never call connection_lost(), so it # would not see an error when the socket is closed. await sleep(0) await self._protocol._drain_helper() class StreamReader: _source_traceback = None def __init__(self, limit=_DEFAULT_LIMIT, loop=None): # The line length limit is a security feature; # it also doubles as half the buffer limit. if limit <= 0: raise ValueError('Limit cannot be <= 0') self._limit = limit if loop is None: self._loop = events.get_event_loop() else: self._loop = loop self._buffer = bytearray() self._eof = False # Whether we're done. self._waiter = None # A future used by _wait_for_data() self._exception = None self._transport = None self._paused = False if self._loop.get_debug(): self._source_traceback = format_helpers.extract_stack( sys._getframe(1)) def __repr__(self): info = ['StreamReader'] if self._buffer: info.append(f'{len(self._buffer)} bytes') if self._eof: info.append('eof') if self._limit != _DEFAULT_LIMIT: info.append(f'limit={self._limit}') if self._waiter: info.append(f'waiter={self._waiter!r}') if self._exception: info.append(f'exception={self._exception!r}') if self._transport: info.append(f'transport={self._transport!r}') if self._paused: info.append('paused') return '<{}>'.format(' '.join(info)) def exception(self): return self._exception def set_exception(self, exc): self._exception = exc waiter = self._waiter if waiter is not None: self._waiter = None if not waiter.cancelled(): waiter.set_exception(exc) def _wakeup_waiter(self): """Wakeup read*() functions waiting for data or EOF.""" waiter = self._waiter if waiter is not None: self._waiter = None if not waiter.cancelled(): waiter.set_result(None) def set_transport(self, transport): assert self._transport is None, 'Transport already set' self._transport = transport def _maybe_resume_transport(self): if self._paused and len(self._buffer) <= self._limit: self._paused = False self._transport.resume_reading() def feed_eof(self): self._eof = True self._wakeup_waiter() def at_eof(self): """Return True if the buffer is empty and 'feed_eof' was called.""" return self._eof and not self._buffer def feed_data(self, data): assert not self._eof, 'feed_data after feed_eof' if not data: return self._buffer.extend(data) self._wakeup_waiter() if (self._transport is not None and not self._paused and len(self._buffer) > 2 * self._limit): try: self._transport.pause_reading() except NotImplementedError: # The transport can't be paused. # We'll just have to buffer all data. # Forget the transport so we don't keep trying. self._transport = None else: self._paused = True async def _wait_for_data(self, func_name): """Wait until feed_data() or feed_eof() is called. If stream was paused, automatically resume it. """ # StreamReader uses a future to link the protocol feed_data() method # to a read coroutine. Running two read coroutines at the same time # would have an unexpected behaviour. It would not possible to know # which coroutine would get the next data. if self._waiter is not None: raise RuntimeError( f'{func_name}() called while another coroutine is ' f'already waiting for incoming data') assert not self._eof, '_wait_for_data after EOF' # Waiting for data while paused will make deadlock, so prevent it. # This is essential for readexactly(n) for case when n > self._limit. if self._paused: self._paused = False self._transport.resume_reading() self._waiter = self._loop.create_future() try: await self._waiter finally: self._waiter = None async def readline(self): """Read chunk of data from the stream until newline (b'\n') is found. On success, return chunk that ends with newline. If only partial line can be read due to EOF, return incomplete line without terminating newline. When EOF was reached while no bytes read, empty bytes object is returned. If limit is reached, ValueError will be raised. In that case, if newline was found, complete line including newline will be removed from internal buffer. Else, internal buffer will be cleared. Limit is compared against part of the line without newline. If stream was paused, this function will automatically resume it if needed. """ sep = b'\n' seplen = len(sep) try: line = await self.readuntil(sep) except exceptions.IncompleteReadError as e: return e.partial except exceptions.LimitOverrunError as e: if self._buffer.startswith(sep, e.consumed): del self._buffer[:e.consumed + seplen] else: self._buffer.clear() self._maybe_resume_transport() raise ValueError(e.args[0]) return line async def readuntil(self, separator=b'\n'): """Read data from the stream until ``separator`` is found. On success, the data and separator will be removed from the internal buffer (consumed). Returned data will include the separator at the end. Configured stream limit is used to check result. Limit sets the maximal length of data that can be returned, not counting the separator. If an EOF occurs and the complete separator is still not found, an IncompleteReadError exception will be raised, and the internal buffer will be reset. The IncompleteReadError.partial attribute may contain the separator partially. If the data cannot be read because of over limit, a LimitOverrunError exception will be raised, and the data will be left in the internal buffer, so it can be read again. """ seplen = len(separator) if seplen == 0: raise ValueError('Separator should be at least one-byte string') if self._exception is not None: raise self._exception # Consume whole buffer except last bytes, which length is # one less than seplen. Let's check corner cases with # separator='SEPARATOR': # * we have received almost complete separator (without last # byte). i.e buffer='some textSEPARATO'. In this case we # can safely consume len(separator) - 1 bytes. # * last byte of buffer is first byte of separator, i.e. # buffer='abcdefghijklmnopqrS'. We may safely consume # everything except that last byte, but this require to # analyze bytes of buffer that match partial separator. # This is slow and/or require FSM. For this case our # implementation is not optimal, since require rescanning # of data that is known to not belong to separator. In # real world, separator will not be so long to notice # performance problems. Even when reading MIME-encoded # messages :) # `offset` is the number of bytes from the beginning of the buffer # where there is no occurrence of `separator`. offset = 0 # Loop until we find `separator` in the buffer, exceed the buffer size, # or an EOF has happened. while True: buflen = len(self._buffer) # Check if we now have enough data in the buffer for `separator` to # fit. if buflen - offset >= seplen: isep = self._buffer.find(separator, offset) if isep != -1: # `separator` is in the buffer. `isep` will be used later # to retrieve the data. break # see upper comment for explanation. offset = buflen + 1 - seplen if offset > self._limit: raise exceptions.LimitOverrunError( 'Separator is not found, and chunk exceed the limit', offset) # Complete message (with full separator) may be present in buffer # even when EOF flag is set. This may happen when the last chunk # adds data which makes separator be found. That's why we check for # EOF *ater* inspecting the buffer. if self._eof: chunk = bytes(self._buffer) self._buffer.clear() raise exceptions.IncompleteReadError(chunk, None) # _wait_for_data() will resume reading if stream was paused. await self._wait_for_data('readuntil') if isep > self._limit: raise exceptions.LimitOverrunError( 'Separator is found, but chunk is longer than limit', isep) chunk = self._buffer[:isep + seplen] del self._buffer[:isep + seplen] self._maybe_resume_transport() return bytes(chunk) async def read(self, n=-1): """Read up to `n` bytes from the stream. If n is not provided, or set to -1, read until EOF and return all read bytes. If the EOF was received and the internal buffer is empty, return an empty bytes object. If n is zero, return empty bytes object immediately. If n is positive, this function try to read `n` bytes, and may return less or equal bytes than requested, but at least one byte. If EOF was received before any byte is read, this function returns empty byte object. Returned value is not limited with limit, configured at stream creation. If stream was paused, this function will automatically resume it if needed. """ if self._exception is not None: raise self._exception if n == 0: return b'' if n < 0: # This used to just loop creating a new waiter hoping to # collect everything in self._buffer, but that would # deadlock if the subprocess sends more than self.limit # bytes. So just call self.read(self._limit) until EOF. blocks = [] while True: block = await self.read(self._limit) if not block: break blocks.append(block) return b''.join(blocks) if not self._buffer and not self._eof: await self._wait_for_data('read') # This will work right even if buffer is less than n bytes data = bytes(self._buffer[:n]) del self._buffer[:n] self._maybe_resume_transport() return data async def readexactly(self, n): """Read exactly `n` bytes. Raise an IncompleteReadError if EOF is reached before `n` bytes can be read. The IncompleteReadError.partial attribute of the exception will contain the partial read bytes. if n is zero, return empty bytes object. Returned value is not limited with limit, configured at stream creation. If stream was paused, this function will automatically resume it if needed. """ if n < 0: raise ValueError('readexactly size can not be less than zero') if self._exception is not None: raise self._exception if n == 0: return b'' while len(self._buffer) < n: if self._eof: incomplete = bytes(self._buffer) self._buffer.clear() raise exceptions.IncompleteReadError(incomplete, n) await self._wait_for_data('readexactly') if len(self._buffer) == n: data = bytes(self._buffer) self._buffer.clear() else: data = bytes(self._buffer[:n]) del self._buffer[:n] self._maybe_resume_transport() return data def __aiter__(self): return self async def __anext__(self): val = await self.readline() if val == b'': raise StopAsyncIteration return val locks.py 0000644 00000041574 15030732674 0006253 0 ustar 00 """Synchronization primitives.""" __all__ = ('Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore') import collections import types import warnings from . import events from . import futures from . import exceptions from .import coroutines class _ContextManager: """Context manager. This enables the following idiom for acquiring and releasing a lock around a block: with (yield from lock): <block> while failing loudly when accidentally using: with lock: <block> Deprecated, use 'async with' statement: async with lock: <block> """ def __init__(self, lock): self._lock = lock def __enter__(self): # We have no use for the "as ..." clause in the with # statement for locks. return None def __exit__(self, *args): try: self._lock.release() finally: self._lock = None # Crudely prevent reuse. class _ContextManagerMixin: def __enter__(self): raise RuntimeError( '"yield from" should be used as context manager expression') def __exit__(self, *args): # This must exist because __enter__ exists, even though that # always raises; that's how the with-statement works. pass @types.coroutine def __iter__(self): # This is not a coroutine. It is meant to enable the idiom: # # with (yield from lock): # <block> # # as an alternative to: # # yield from lock.acquire() # try: # <block> # finally: # lock.release() # Deprecated, use 'async with' statement: # async with lock: # <block> warnings.warn("'with (yield from lock)' is deprecated " "use 'async with lock' instead", DeprecationWarning, stacklevel=2) yield from self.acquire() return _ContextManager(self) # The flag is needed for legacy asyncio.iscoroutine() __iter__._is_coroutine = coroutines._is_coroutine async def __acquire_ctx(self): await self.acquire() return _ContextManager(self) def __await__(self): warnings.warn("'with await lock' is deprecated " "use 'async with lock' instead", DeprecationWarning, stacklevel=2) # To make "with await lock" work. return self.__acquire_ctx().__await__() async def __aenter__(self): await self.acquire() # We have no use for the "as ..." clause in the with # statement for locks. return None async def __aexit__(self, exc_type, exc, tb): self.release() class Lock(_ContextManagerMixin): """Primitive lock objects. A primitive lock is a synchronization primitive that is not owned by a particular coroutine when locked. A primitive lock is in one of two states, 'locked' or 'unlocked'. It is created in the unlocked state. It has two basic methods, acquire() and release(). When the state is unlocked, acquire() changes the state to locked and returns immediately. When the state is locked, acquire() blocks until a call to release() in another coroutine changes it to unlocked, then the acquire() call resets it to locked and returns. The release() method should only be called in the locked state; it changes the state to unlocked and returns immediately. If an attempt is made to release an unlocked lock, a RuntimeError will be raised. When more than one coroutine is blocked in acquire() waiting for the state to turn to unlocked, only one coroutine proceeds when a release() call resets the state to unlocked; first coroutine which is blocked in acquire() is being processed. acquire() is a coroutine and should be called with 'await'. Locks also support the asynchronous context management protocol. 'async with lock' statement should be used. Usage: lock = Lock() ... await lock.acquire() try: ... finally: lock.release() Context manager usage: lock = Lock() ... async with lock: ... Lock objects can be tested for locking state: if not lock.locked(): await lock.acquire() else: # lock is acquired ... """ def __init__(self, *, loop=None): self._waiters = None self._locked = False if loop is None: self._loop = events.get_event_loop() else: self._loop = loop warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) def __repr__(self): res = super().__repr__() extra = 'locked' if self._locked else 'unlocked' if self._waiters: extra = f'{extra}, waiters:{len(self._waiters)}' return f'<{res[1:-1]} [{extra}]>' def locked(self): """Return True if lock is acquired.""" return self._locked async def acquire(self): """Acquire a lock. This method blocks until the lock is unlocked, then sets it to locked and returns True. """ if (not self._locked and (self._waiters is None or all(w.cancelled() for w in self._waiters))): self._locked = True return True if self._waiters is None: self._waiters = collections.deque() fut = self._loop.create_future() self._waiters.append(fut) # Finally block should be called before the CancelledError # handling as we don't want CancelledError to call # _wake_up_first() and attempt to wake up itself. try: try: await fut finally: self._waiters.remove(fut) except exceptions.CancelledError: if not self._locked: self._wake_up_first() raise self._locked = True return True def release(self): """Release a lock. When the lock is locked, reset it to unlocked, and return. If any other coroutines are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed. When invoked on an unlocked lock, a RuntimeError is raised. There is no return value. """ if self._locked: self._locked = False self._wake_up_first() else: raise RuntimeError('Lock is not acquired.') def _wake_up_first(self): """Wake up the first waiter if it isn't done.""" if not self._waiters: return try: fut = next(iter(self._waiters)) except StopIteration: return # .done() necessarily means that a waiter will wake up later on and # either take the lock, or, if it was cancelled and lock wasn't # taken already, will hit this again and wake up a new waiter. if not fut.done(): fut.set_result(True) class Event: """Asynchronous equivalent to threading.Event. Class implementing event objects. An event manages a flag that can be set to true with the set() method and reset to false with the clear() method. The wait() method blocks until the flag is true. The flag is initially false. """ def __init__(self, *, loop=None): self._waiters = collections.deque() self._value = False if loop is None: self._loop = events.get_event_loop() else: self._loop = loop warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) def __repr__(self): res = super().__repr__() extra = 'set' if self._value else 'unset' if self._waiters: extra = f'{extra}, waiters:{len(self._waiters)}' return f'<{res[1:-1]} [{extra}]>' def is_set(self): """Return True if and only if the internal flag is true.""" return self._value def set(self): """Set the internal flag to true. All coroutines waiting for it to become true are awakened. Coroutine that call wait() once the flag is true will not block at all. """ if not self._value: self._value = True for fut in self._waiters: if not fut.done(): fut.set_result(True) def clear(self): """Reset the internal flag to false. Subsequently, coroutines calling wait() will block until set() is called to set the internal flag to true again.""" self._value = False async def wait(self): """Block until the internal flag is true. If the internal flag is true on entry, return True immediately. Otherwise, block until another coroutine calls set() to set the flag to true, then return True. """ if self._value: return True fut = self._loop.create_future() self._waiters.append(fut) try: await fut return True finally: self._waiters.remove(fut) class Condition(_ContextManagerMixin): """Asynchronous equivalent to threading.Condition. This class implements condition variable objects. A condition variable allows one or more coroutines to wait until they are notified by another coroutine. A new Lock object is created and used as the underlying lock. """ def __init__(self, lock=None, *, loop=None): if loop is None: self._loop = events.get_event_loop() else: self._loop = loop warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) if lock is None: lock = Lock(loop=loop) elif lock._loop is not self._loop: raise ValueError("loop argument must agree with lock") self._lock = lock # Export the lock's locked(), acquire() and release() methods. self.locked = lock.locked self.acquire = lock.acquire self.release = lock.release self._waiters = collections.deque() def __repr__(self): res = super().__repr__() extra = 'locked' if self.locked() else 'unlocked' if self._waiters: extra = f'{extra}, waiters:{len(self._waiters)}' return f'<{res[1:-1]} [{extra}]>' async def wait(self): """Wait until notified. If the calling coroutine has not acquired the lock when this method is called, a RuntimeError is raised. This method releases the underlying lock, and then blocks until it is awakened by a notify() or notify_all() call for the same condition variable in another coroutine. Once awakened, it re-acquires the lock and returns True. """ if not self.locked(): raise RuntimeError('cannot wait on un-acquired lock') self.release() try: fut = self._loop.create_future() self._waiters.append(fut) try: await fut return True finally: self._waiters.remove(fut) finally: # Must reacquire lock even if wait is cancelled cancelled = False while True: try: await self.acquire() break except exceptions.CancelledError: cancelled = True if cancelled: raise exceptions.CancelledError async def wait_for(self, predicate): """Wait until a predicate becomes true. The predicate should be a callable which result will be interpreted as a boolean value. The final predicate value is the return value. """ result = predicate() while not result: await self.wait() result = predicate() return result def notify(self, n=1): """By default, wake up one coroutine waiting on this condition, if any. If the calling coroutine has not acquired the lock when this method is called, a RuntimeError is raised. This method wakes up at most n of the coroutines waiting for the condition variable; it is a no-op if no coroutines are waiting. Note: an awakened coroutine does not actually return from its wait() call until it can reacquire the lock. Since notify() does not release the lock, its caller should. """ if not self.locked(): raise RuntimeError('cannot notify on un-acquired lock') idx = 0 for fut in self._waiters: if idx >= n: break if not fut.done(): idx += 1 fut.set_result(False) def notify_all(self): """Wake up all threads waiting on this condition. This method acts like notify(), but wakes up all waiting threads instead of one. If the calling thread has not acquired the lock when this method is called, a RuntimeError is raised. """ self.notify(len(self._waiters)) class Semaphore(_ContextManagerMixin): """A Semaphore implementation. A semaphore manages an internal counter which is decremented by each acquire() call and incremented by each release() call. The counter can never go below zero; when acquire() finds that it is zero, it blocks, waiting until some other thread calls release(). Semaphores also support the context management protocol. The optional argument gives the initial value for the internal counter; it defaults to 1. If the value given is less than 0, ValueError is raised. """ def __init__(self, value=1, *, loop=None): if value < 0: raise ValueError("Semaphore initial value must be >= 0") self._value = value self._waiters = collections.deque() if loop is None: self._loop = events.get_event_loop() else: self._loop = loop warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) def __repr__(self): res = super().__repr__() extra = 'locked' if self.locked() else f'unlocked, value:{self._value}' if self._waiters: extra = f'{extra}, waiters:{len(self._waiters)}' return f'<{res[1:-1]} [{extra}]>' def _wake_up_next(self): while self._waiters: waiter = self._waiters.popleft() if not waiter.done(): waiter.set_result(None) return def locked(self): """Returns True if semaphore can not be acquired immediately.""" return self._value == 0 async def acquire(self): """Acquire a semaphore. If the internal counter is larger than zero on entry, decrement it by one and return True immediately. If it is zero on entry, block, waiting until some other coroutine has called release() to make it larger than 0, and then return True. """ while self._value <= 0: fut = self._loop.create_future() self._waiters.append(fut) try: await fut except: # See the similar code in Queue.get. fut.cancel() if self._value > 0 and not fut.cancelled(): self._wake_up_next() raise self._value -= 1 return True def release(self): """Release a semaphore, incrementing the internal counter by one. When it was zero on entry and another coroutine is waiting for it to become larger than zero again, wake up that coroutine. """ self._value += 1 self._wake_up_next() class BoundedSemaphore(Semaphore): """A bounded semaphore implementation. This raises ValueError in release() if it would increase the value above the initial value. """ def __init__(self, value=1, *, loop=None): if loop: warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) self._bound_value = value super().__init__(value, loop=loop) def release(self): if self._value >= self._bound_value: raise ValueError('BoundedSemaphore released too many times') super().release() queues.py 0000644 00000020037 15030732674 0006436 0 ustar 00 __all__ = ('Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty') import collections import heapq import warnings from . import events from . import locks class QueueEmpty(Exception): """Raised when Queue.get_nowait() is called on an empty Queue.""" pass class QueueFull(Exception): """Raised when the Queue.put_nowait() method is called on a full Queue.""" pass class Queue: """A queue, useful for coordinating producer and consumer coroutines. If maxsize is less than or equal to zero, the queue size is infinite. If it is an integer greater than 0, then "await put()" will block when the queue reaches maxsize, until an item is removed by get(). Unlike the standard library Queue, you can reliably know this Queue's size with qsize(), since your single-threaded asyncio application won't be interrupted between calling qsize() and doing an operation on the Queue. """ def __init__(self, maxsize=0, *, loop=None): if loop is None: self._loop = events.get_event_loop() else: self._loop = loop warnings.warn("The loop argument is deprecated since Python 3.8, " "and scheduled for removal in Python 3.10.", DeprecationWarning, stacklevel=2) self._maxsize = maxsize # Futures. self._getters = collections.deque() # Futures. self._putters = collections.deque() self._unfinished_tasks = 0 self._finished = locks.Event(loop=loop) self._finished.set() self._init(maxsize) # These three are overridable in subclasses. def _init(self, maxsize): self._queue = collections.deque() def _get(self): return self._queue.popleft() def _put(self, item): self._queue.append(item) # End of the overridable methods. def _wakeup_next(self, waiters): # Wake up the next waiter (if any) that isn't cancelled. while waiters: waiter = waiters.popleft() if not waiter.done(): waiter.set_result(None) break def __repr__(self): return f'<{type(self).__name__} at {id(self):#x} {self._format()}>' def __str__(self): return f'<{type(self).__name__} {self._format()}>' def _format(self): result = f'maxsize={self._maxsize!r}' if getattr(self, '_queue', None): result += f' _queue={list(self._queue)!r}' if self._getters: result += f' _getters[{len(self._getters)}]' if self._putters: result += f' _putters[{len(self._putters)}]' if self._unfinished_tasks: result += f' tasks={self._unfinished_tasks}' return result def qsize(self): """Number of items in the queue.""" return len(self._queue) @property def maxsize(self): """Number of items allowed in the queue.""" return self._maxsize def empty(self): """Return True if the queue is empty, False otherwise.""" return not self._queue def full(self): """Return True if there are maxsize items in the queue. Note: if the Queue was initialized with maxsize=0 (the default), then full() is never True. """ if self._maxsize <= 0: return False else: return self.qsize() >= self._maxsize async def put(self, item): """Put an item into the queue. Put an item into the queue. If the queue is full, wait until a free slot is available before adding item. """ while self.full(): putter = self._loop.create_future() self._putters.append(putter) try: await putter except: putter.cancel() # Just in case putter is not done yet. try: # Clean self._putters from canceled putters. self._putters.remove(putter) except ValueError: # The putter could be removed from self._putters by a # previous get_nowait call. pass if not self.full() and not putter.cancelled(): # We were woken up by get_nowait(), but can't take # the call. Wake up the next in line. self._wakeup_next(self._putters) raise return self.put_nowait(item) def put_nowait(self, item): """Put an item into the queue without blocking. If no free slot is immediately available, raise QueueFull. """ if self.full(): raise QueueFull self._put(item) self._unfinished_tasks += 1 self._finished.clear() self._wakeup_next(self._getters) async def get(self): """Remove and return an item from the queue. If queue is empty, wait until an item is available. """ while self.empty(): getter = self._loop.create_future() self._getters.append(getter) try: await getter except: getter.cancel() # Just in case getter is not done yet. try: # Clean self._getters from canceled getters. self._getters.remove(getter) except ValueError: # The getter could be removed from self._getters by a # previous put_nowait call. pass if not self.empty() and not getter.cancelled(): # We were woken up by put_nowait(), but can't take # the call. Wake up the next in line. self._wakeup_next(self._getters) raise return self.get_nowait() def get_nowait(self): """Remove and return an item from the queue. Return an item if one is immediately available, else raise QueueEmpty. """ if self.empty(): raise QueueEmpty item = self._get() self._wakeup_next(self._putters) return item def task_done(self): """Indicate that a formerly enqueued task is complete. Used by queue consumers. For each get() used to fetch a task, a subsequent call to task_done() tells the queue that the processing on the task is complete. If a join() is currently blocking, it will resume when all items have been processed (meaning that a task_done() call was received for every item that had been put() into the queue). Raises ValueError if called more times than there were items placed in the queue. """ if self._unfinished_tasks <= 0: raise ValueError('task_done() called too many times') self._unfinished_tasks -= 1 if self._unfinished_tasks == 0: self._finished.set() async def join(self): """Block until all items in the queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer calls task_done() to indicate that the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks. """ if self._unfinished_tasks > 0: await self._finished.wait() class PriorityQueue(Queue): """A subclass of Queue; retrieves entries in priority order (lowest first). Entries are typically tuples of the form: (priority number, data). """ def _init(self, maxsize): self._queue = [] def _put(self, item, heappush=heapq.heappush): heappush(self._queue, item) def _get(self, heappop=heapq.heappop): return heappop(self._queue) class LifoQueue(Queue): """A subclass of Queue that retrieves most recently added entries first.""" def _init(self, maxsize): self._queue = [] def _put(self, item): self._queue.append(item) def _get(self): return self._queue.pop() selector_events.py 0000644 00000114124 15030732674 0010334 0 ustar 00 """Event loop using a selector and related classes. A selector is a "notify-when-ready" multiplexer. For a subclass which also includes support for signal handling, see the unix_events sub-module. """ __all__ = 'BaseSelectorEventLoop', import collections import errno import functools import selectors import socket import warnings import weakref try: import ssl except ImportError: # pragma: no cover ssl = None from . import base_events from . import constants from . import events from . import futures from . import protocols from . import sslproto from . import transports from . import trsock from .log import logger def _test_selector_event(selector, fd, event): # Test if the selector is monitoring 'event' events # for the file descriptor 'fd'. try: key = selector.get_key(fd) except KeyError: return False else: return bool(key.events & event) def _check_ssl_socket(sock): if ssl is not None and isinstance(sock, ssl.SSLSocket): raise TypeError("Socket cannot be of type SSLSocket") class BaseSelectorEventLoop(base_events.BaseEventLoop): """Selector event loop. See events.EventLoop for API specification. """ def __init__(self, selector=None): super().__init__() if selector is None: selector = selectors.DefaultSelector() logger.debug('Using selector: %s', selector.__class__.__name__) self._selector = selector self._make_self_pipe() self._transports = weakref.WeakValueDictionary() def _make_socket_transport(self, sock, protocol, waiter=None, *, extra=None, server=None): return _SelectorSocketTransport(self, sock, protocol, waiter, extra, server) def _make_ssl_transport( self, rawsock, protocol, sslcontext, waiter=None, *, server_side=False, server_hostname=None, extra=None, server=None, ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): ssl_protocol = sslproto.SSLProtocol( self, protocol, sslcontext, waiter, server_side, server_hostname, ssl_handshake_timeout=ssl_handshake_timeout) _SelectorSocketTransport(self, rawsock, ssl_protocol, extra=extra, server=server) return ssl_protocol._app_transport def _make_datagram_transport(self, sock, protocol, address=None, waiter=None, extra=None): return _SelectorDatagramTransport(self, sock, protocol, address, waiter, extra) def close(self): if self.is_running(): raise RuntimeError("Cannot close a running event loop") if self.is_closed(): return self._close_self_pipe() super().close() if self._selector is not None: self._selector.close() self._selector = None def _close_self_pipe(self): self._remove_reader(self._ssock.fileno()) self._ssock.close() self._ssock = None self._csock.close() self._csock = None self._internal_fds -= 1 def _make_self_pipe(self): # A self-socket, really. :-) self._ssock, self._csock = socket.socketpair() self._ssock.setblocking(False) self._csock.setblocking(False) self._internal_fds += 1 self._add_reader(self._ssock.fileno(), self._read_from_self) def _process_self_data(self, data): pass def _read_from_self(self): while True: try: data = self._ssock.recv(4096) if not data: break self._process_self_data(data) except InterruptedError: continue except BlockingIOError: break def _write_to_self(self): # This may be called from a different thread, possibly after # _close_self_pipe() has been called or even while it is # running. Guard for self._csock being None or closed. When # a socket is closed, send() raises OSError (with errno set to # EBADF, but let's not rely on the exact error code). csock = self._csock if csock is None: return try: csock.send(b'\0') except OSError: if self._debug: logger.debug("Fail to write a null byte into the " "self-pipe socket", exc_info=True) def _start_serving(self, protocol_factory, sock, sslcontext=None, server=None, backlog=100, ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): self._add_reader(sock.fileno(), self._accept_connection, protocol_factory, sock, sslcontext, server, backlog, ssl_handshake_timeout) def _accept_connection( self, protocol_factory, sock, sslcontext=None, server=None, backlog=100, ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): # This method is only called once for each event loop tick where the # listening socket has triggered an EVENT_READ. There may be multiple # connections waiting for an .accept() so it is called in a loop. # See https://bugs.python.org/issue27906 for more details. for _ in range(backlog): try: conn, addr = sock.accept() if self._debug: logger.debug("%r got a new connection from %r: %r", server, addr, conn) conn.setblocking(False) except (BlockingIOError, InterruptedError, ConnectionAbortedError): # Early exit because the socket accept buffer is empty. return None except OSError as exc: # There's nowhere to send the error, so just log it. if exc.errno in (errno.EMFILE, errno.ENFILE, errno.ENOBUFS, errno.ENOMEM): # Some platforms (e.g. Linux keep reporting the FD as # ready, so we remove the read handler temporarily. # We'll try again in a while. self.call_exception_handler({ 'message': 'socket.accept() out of system resource', 'exception': exc, 'socket': trsock.TransportSocket(sock), }) self._remove_reader(sock.fileno()) self.call_later(constants.ACCEPT_RETRY_DELAY, self._start_serving, protocol_factory, sock, sslcontext, server, backlog, ssl_handshake_timeout) else: raise # The event loop will catch, log and ignore it. else: extra = {'peername': addr} accept = self._accept_connection2( protocol_factory, conn, extra, sslcontext, server, ssl_handshake_timeout) self.create_task(accept) async def _accept_connection2( self, protocol_factory, conn, extra, sslcontext=None, server=None, ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): protocol = None transport = None try: protocol = protocol_factory() waiter = self.create_future() if sslcontext: transport = self._make_ssl_transport( conn, protocol, sslcontext, waiter=waiter, server_side=True, extra=extra, server=server, ssl_handshake_timeout=ssl_handshake_timeout) else: transport = self._make_socket_transport( conn, protocol, waiter=waiter, extra=extra, server=server) try: await waiter except BaseException: transport.close() raise # It's now up to the protocol to handle the connection. except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: if self._debug: context = { 'message': 'Error on transport creation for incoming connection', 'exception': exc, } if protocol is not None: context['protocol'] = protocol if transport is not None: context['transport'] = transport self.call_exception_handler(context) def _ensure_fd_no_transport(self, fd): fileno = fd if not isinstance(fileno, int): try: fileno = int(fileno.fileno()) except (AttributeError, TypeError, ValueError): # This code matches selectors._fileobj_to_fd function. raise ValueError(f"Invalid file object: {fd!r}") from None try: transport = self._transports[fileno] except KeyError: pass else: if not transport.is_closing(): raise RuntimeError( f'File descriptor {fd!r} is used by transport ' f'{transport!r}') def _add_reader(self, fd, callback, *args): self._check_closed() handle = events.Handle(callback, args, self, None) try: key = self._selector.get_key(fd) except KeyError: self._selector.register(fd, selectors.EVENT_READ, (handle, None)) else: mask, (reader, writer) = key.events, key.data self._selector.modify(fd, mask | selectors.EVENT_READ, (handle, writer)) if reader is not None: reader.cancel() def _remove_reader(self, fd): if self.is_closed(): return False try: key = self._selector.get_key(fd) except KeyError: return False else: mask, (reader, writer) = key.events, key.data mask &= ~selectors.EVENT_READ if not mask: self._selector.unregister(fd) else: self._selector.modify(fd, mask, (None, writer)) if reader is not None: reader.cancel() return True else: return False def _add_writer(self, fd, callback, *args): self._check_closed() handle = events.Handle(callback, args, self, None) try: key = self._selector.get_key(fd) except KeyError: self._selector.register(fd, selectors.EVENT_WRITE, (None, handle)) else: mask, (reader, writer) = key.events, key.data self._selector.modify(fd, mask | selectors.EVENT_WRITE, (reader, handle)) if writer is not None: writer.cancel() def _remove_writer(self, fd): """Remove a writer callback.""" if self.is_closed(): return False try: key = self._selector.get_key(fd) except KeyError: return False else: mask, (reader, writer) = key.events, key.data # Remove both writer and connector. mask &= ~selectors.EVENT_WRITE if not mask: self._selector.unregister(fd) else: self._selector.modify(fd, mask, (reader, None)) if writer is not None: writer.cancel() return True else: return False def add_reader(self, fd, callback, *args): """Add a reader callback.""" self._ensure_fd_no_transport(fd) return self._add_reader(fd, callback, *args) def remove_reader(self, fd): """Remove a reader callback.""" self._ensure_fd_no_transport(fd) return self._remove_reader(fd) def add_writer(self, fd, callback, *args): """Add a writer callback..""" self._ensure_fd_no_transport(fd) return self._add_writer(fd, callback, *args) def remove_writer(self, fd): """Remove a writer callback.""" self._ensure_fd_no_transport(fd) return self._remove_writer(fd) async def sock_recv(self, sock, n): """Receive data from the socket. The return value is a bytes object representing the data received. The maximum amount of data to be received at once is specified by nbytes. """ _check_ssl_socket(sock) if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") try: return sock.recv(n) except (BlockingIOError, InterruptedError): pass fut = self.create_future() fd = sock.fileno() self.add_reader(fd, self._sock_recv, fut, sock, n) fut.add_done_callback( functools.partial(self._sock_read_done, fd)) return await fut def _sock_read_done(self, fd, fut): self.remove_reader(fd) def _sock_recv(self, fut, sock, n): # _sock_recv() can add itself as an I/O callback if the operation can't # be done immediately. Don't use it directly, call sock_recv(). if fut.done(): return try: data = sock.recv(n) except (BlockingIOError, InterruptedError): return # try again next time except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: fut.set_exception(exc) else: fut.set_result(data) async def sock_recv_into(self, sock, buf): """Receive data from the socket. The received data is written into *buf* (a writable buffer). The return value is the number of bytes written. """ _check_ssl_socket(sock) if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") try: return sock.recv_into(buf) except (BlockingIOError, InterruptedError): pass fut = self.create_future() fd = sock.fileno() self.add_reader(fd, self._sock_recv_into, fut, sock, buf) fut.add_done_callback( functools.partial(self._sock_read_done, fd)) return await fut def _sock_recv_into(self, fut, sock, buf): # _sock_recv_into() can add itself as an I/O callback if the operation # can't be done immediately. Don't use it directly, call # sock_recv_into(). if fut.done(): return try: nbytes = sock.recv_into(buf) except (BlockingIOError, InterruptedError): return # try again next time except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: fut.set_exception(exc) else: fut.set_result(nbytes) async def sock_sendall(self, sock, data): """Send data to the socket. The socket must be connected to a remote socket. This method continues to send data from data until either all data has been sent or an error occurs. None is returned on success. On error, an exception is raised, and there is no way to determine how much data, if any, was successfully processed by the receiving end of the connection. """ _check_ssl_socket(sock) if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") try: n = sock.send(data) except (BlockingIOError, InterruptedError): n = 0 if n == len(data): # all data sent return fut = self.create_future() fd = sock.fileno() fut.add_done_callback( functools.partial(self._sock_write_done, fd)) # use a trick with a list in closure to store a mutable state self.add_writer(fd, self._sock_sendall, fut, sock, memoryview(data), [n]) return await fut def _sock_sendall(self, fut, sock, view, pos): if fut.done(): # Future cancellation can be scheduled on previous loop iteration return start = pos[0] try: n = sock.send(view[start:]) except (BlockingIOError, InterruptedError): return except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: fut.set_exception(exc) return start += n if start == len(view): fut.set_result(None) else: pos[0] = start async def sock_connect(self, sock, address): """Connect to a remote socket at address. This method is a coroutine. """ _check_ssl_socket(sock) if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") if not hasattr(socket, 'AF_UNIX') or sock.family != socket.AF_UNIX: resolved = await self._ensure_resolved( address, family=sock.family, proto=sock.proto, loop=self) _, _, _, _, address = resolved[0] fut = self.create_future() self._sock_connect(fut, sock, address) return await fut def _sock_connect(self, fut, sock, address): fd = sock.fileno() try: sock.connect(address) except (BlockingIOError, InterruptedError): # Issue #23618: When the C function connect() fails with EINTR, the # connection runs in background. We have to wait until the socket # becomes writable to be notified when the connection succeed or # fails. fut.add_done_callback( functools.partial(self._sock_write_done, fd)) self.add_writer(fd, self._sock_connect_cb, fut, sock, address) except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: fut.set_exception(exc) else: fut.set_result(None) def _sock_write_done(self, fd, fut): self.remove_writer(fd) def _sock_connect_cb(self, fut, sock, address): if fut.done(): return try: err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) if err != 0: # Jump to any except clause below. raise OSError(err, f'Connect call failed {address}') except (BlockingIOError, InterruptedError): # socket is still registered, the callback will be retried later pass except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: fut.set_exception(exc) else: fut.set_result(None) async def sock_accept(self, sock): """Accept a connection. The socket must be bound to an address and listening for connections. The return value is a pair (conn, address) where conn is a new socket object usable to send and receive data on the connection, and address is the address bound to the socket on the other end of the connection. """ _check_ssl_socket(sock) if self._debug and sock.gettimeout() != 0: raise ValueError("the socket must be non-blocking") fut = self.create_future() self._sock_accept(fut, False, sock) return await fut def _sock_accept(self, fut, registered, sock): fd = sock.fileno() if registered: self.remove_reader(fd) if fut.done(): return try: conn, address = sock.accept() conn.setblocking(False) except (BlockingIOError, InterruptedError): self.add_reader(fd, self._sock_accept, fut, True, sock) except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: fut.set_exception(exc) else: fut.set_result((conn, address)) async def _sendfile_native(self, transp, file, offset, count): del self._transports[transp._sock_fd] resume_reading = transp.is_reading() transp.pause_reading() await transp._make_empty_waiter() try: return await self.sock_sendfile(transp._sock, file, offset, count, fallback=False) finally: transp._reset_empty_waiter() if resume_reading: transp.resume_reading() self._transports[transp._sock_fd] = transp def _process_events(self, event_list): for key, mask in event_list: fileobj, (reader, writer) = key.fileobj, key.data if mask & selectors.EVENT_READ and reader is not None: if reader._cancelled: self._remove_reader(fileobj) else: self._add_callback(reader) if mask & selectors.EVENT_WRITE and writer is not None: if writer._cancelled: self._remove_writer(fileobj) else: self._add_callback(writer) def _stop_serving(self, sock): self._remove_reader(sock.fileno()) sock.close() class _SelectorTransport(transports._FlowControlMixin, transports.Transport): max_size = 256 * 1024 # Buffer size passed to recv(). _buffer_factory = bytearray # Constructs initial value for self._buffer. # Attribute used in the destructor: it must be set even if the constructor # is not called (see _SelectorSslTransport which may start by raising an # exception) _sock = None def __init__(self, loop, sock, protocol, extra=None, server=None): super().__init__(extra, loop) self._extra['socket'] = trsock.TransportSocket(sock) try: self._extra['sockname'] = sock.getsockname() except OSError: self._extra['sockname'] = None if 'peername' not in self._extra: try: self._extra['peername'] = sock.getpeername() except socket.error: self._extra['peername'] = None self._sock = sock self._sock_fd = sock.fileno() self._protocol_connected = False self.set_protocol(protocol) self._server = server self._buffer = self._buffer_factory() self._conn_lost = 0 # Set when call to connection_lost scheduled. self._closing = False # Set when close() called. if self._server is not None: self._server._attach() loop._transports[self._sock_fd] = self def __repr__(self): info = [self.__class__.__name__] if self._sock is None: info.append('closed') elif self._closing: info.append('closing') info.append(f'fd={self._sock_fd}') # test if the transport was closed if self._loop is not None and not self._loop.is_closed(): polling = _test_selector_event(self._loop._selector, self._sock_fd, selectors.EVENT_READ) if polling: info.append('read=polling') else: info.append('read=idle') polling = _test_selector_event(self._loop._selector, self._sock_fd, selectors.EVENT_WRITE) if polling: state = 'polling' else: state = 'idle' bufsize = self.get_write_buffer_size() info.append(f'write=<{state}, bufsize={bufsize}>') return '<{}>'.format(' '.join(info)) def abort(self): self._force_close(None) def set_protocol(self, protocol): self._protocol = protocol self._protocol_connected = True def get_protocol(self): return self._protocol def is_closing(self): return self._closing def close(self): if self._closing: return self._closing = True self._loop._remove_reader(self._sock_fd) if not self._buffer: self._conn_lost += 1 self._loop._remove_writer(self._sock_fd) self._loop.call_soon(self._call_connection_lost, None) def __del__(self, _warn=warnings.warn): if self._sock is not None: _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) self._sock.close() def _fatal_error(self, exc, message='Fatal error on transport'): # Should be called from exception handler only. if isinstance(exc, OSError): if self._loop.get_debug(): logger.debug("%r: %s", self, message, exc_info=True) else: self._loop.call_exception_handler({ 'message': message, 'exception': exc, 'transport': self, 'protocol': self._protocol, }) self._force_close(exc) def _force_close(self, exc): if self._conn_lost: return if self._buffer: self._buffer.clear() self._loop._remove_writer(self._sock_fd) if not self._closing: self._closing = True self._loop._remove_reader(self._sock_fd) self._conn_lost += 1 self._loop.call_soon(self._call_connection_lost, exc) def _call_connection_lost(self, exc): try: if self._protocol_connected: self._protocol.connection_lost(exc) finally: self._sock.close() self._sock = None self._protocol = None self._loop = None server = self._server if server is not None: server._detach() self._server = None def get_write_buffer_size(self): return len(self._buffer) def _add_reader(self, fd, callback, *args): if self._closing: return self._loop._add_reader(fd, callback, *args) class _SelectorSocketTransport(_SelectorTransport): _start_tls_compatible = True _sendfile_compatible = constants._SendfileMode.TRY_NATIVE def __init__(self, loop, sock, protocol, waiter=None, extra=None, server=None): self._read_ready_cb = None super().__init__(loop, sock, protocol, extra, server) self._eof = False self._paused = False self._empty_waiter = None # Disable the Nagle algorithm -- small writes will be # sent without waiting for the TCP ACK. This generally # decreases the latency (in some cases significantly.) base_events._set_nodelay(self._sock) self._loop.call_soon(self._protocol.connection_made, self) # only start reading when connection_made() has been called self._loop.call_soon(self._add_reader, self._sock_fd, self._read_ready) if waiter is not None: # only wake up the waiter when connection_made() has been called self._loop.call_soon(futures._set_result_unless_cancelled, waiter, None) def set_protocol(self, protocol): if isinstance(protocol, protocols.BufferedProtocol): self._read_ready_cb = self._read_ready__get_buffer else: self._read_ready_cb = self._read_ready__data_received super().set_protocol(protocol) def is_reading(self): return not self._paused and not self._closing def pause_reading(self): if self._closing or self._paused: return self._paused = True self._loop._remove_reader(self._sock_fd) if self._loop.get_debug(): logger.debug("%r pauses reading", self) def resume_reading(self): if self._closing or not self._paused: return self._paused = False self._add_reader(self._sock_fd, self._read_ready) if self._loop.get_debug(): logger.debug("%r resumes reading", self) def _read_ready(self): self._read_ready_cb() def _read_ready__get_buffer(self): if self._conn_lost: return try: buf = self._protocol.get_buffer(-1) if not len(buf): raise RuntimeError('get_buffer() returned an empty buffer') except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._fatal_error( exc, 'Fatal error: protocol.get_buffer() call failed.') return try: nbytes = self._sock.recv_into(buf) except (BlockingIOError, InterruptedError): return except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._fatal_error(exc, 'Fatal read error on socket transport') return if not nbytes: self._read_ready__on_eof() return try: self._protocol.buffer_updated(nbytes) except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._fatal_error( exc, 'Fatal error: protocol.buffer_updated() call failed.') def _read_ready__data_received(self): if self._conn_lost: return try: data = self._sock.recv(self.max_size) except (BlockingIOError, InterruptedError): return except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._fatal_error(exc, 'Fatal read error on socket transport') return if not data: self._read_ready__on_eof() return try: self._protocol.data_received(data) except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._fatal_error( exc, 'Fatal error: protocol.data_received() call failed.') def _read_ready__on_eof(self): if self._loop.get_debug(): logger.debug("%r received EOF", self) try: keep_open = self._protocol.eof_received() except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._fatal_error( exc, 'Fatal error: protocol.eof_received() call failed.') return if keep_open: # We're keeping the connection open so the # protocol can write more, but we still can't # receive more, so remove the reader callback. self._loop._remove_reader(self._sock_fd) else: self.close() def write(self, data): if not isinstance(data, (bytes, bytearray, memoryview)): raise TypeError(f'data argument must be a bytes-like object, ' f'not {type(data).__name__!r}') if self._eof: raise RuntimeError('Cannot call write() after write_eof()') if self._empty_waiter is not None: raise RuntimeError('unable to write; sendfile is in progress') if not data: return if self._conn_lost: if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: logger.warning('socket.send() raised exception.') self._conn_lost += 1 return if not self._buffer: # Optimization: try to send now. try: n = self._sock.send(data) except (BlockingIOError, InterruptedError): pass except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._fatal_error(exc, 'Fatal write error on socket transport') return else: data = data[n:] if not data: return # Not all was written; register write handler. self._loop._add_writer(self._sock_fd, self._write_ready) # Add it to the buffer. self._buffer.extend(data) self._maybe_pause_protocol() def _write_ready(self): assert self._buffer, 'Data should not be empty' if self._conn_lost: return try: n = self._sock.send(self._buffer) except (BlockingIOError, InterruptedError): pass except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._loop._remove_writer(self._sock_fd) self._buffer.clear() self._fatal_error(exc, 'Fatal write error on socket transport') if self._empty_waiter is not None: self._empty_waiter.set_exception(exc) else: if n: del self._buffer[:n] self._maybe_resume_protocol() # May append to buffer. if not self._buffer: self._loop._remove_writer(self._sock_fd) if self._empty_waiter is not None: self._empty_waiter.set_result(None) if self._closing: self._call_connection_lost(None) elif self._eof: self._sock.shutdown(socket.SHUT_WR) def write_eof(self): if self._closing or self._eof: return self._eof = True if not self._buffer: self._sock.shutdown(socket.SHUT_WR) def can_write_eof(self): return True def _call_connection_lost(self, exc): super()._call_connection_lost(exc) if self._empty_waiter is not None: self._empty_waiter.set_exception( ConnectionError("Connection is closed by peer")) def _make_empty_waiter(self): if self._empty_waiter is not None: raise RuntimeError("Empty waiter is already set") self._empty_waiter = self._loop.create_future() if not self._buffer: self._empty_waiter.set_result(None) return self._empty_waiter def _reset_empty_waiter(self): self._empty_waiter = None class _SelectorDatagramTransport(_SelectorTransport): _buffer_factory = collections.deque def __init__(self, loop, sock, protocol, address=None, waiter=None, extra=None): super().__init__(loop, sock, protocol, extra) self._address = address self._loop.call_soon(self._protocol.connection_made, self) # only start reading when connection_made() has been called self._loop.call_soon(self._add_reader, self._sock_fd, self._read_ready) if waiter is not None: # only wake up the waiter when connection_made() has been called self._loop.call_soon(futures._set_result_unless_cancelled, waiter, None) def get_write_buffer_size(self): return sum(len(data) for data, _ in self._buffer) def _read_ready(self): if self._conn_lost: return try: data, addr = self._sock.recvfrom(self.max_size) except (BlockingIOError, InterruptedError): pass except OSError as exc: self._protocol.error_received(exc) except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._fatal_error(exc, 'Fatal read error on datagram transport') else: self._protocol.datagram_received(data, addr) def sendto(self, data, addr=None): if not isinstance(data, (bytes, bytearray, memoryview)): raise TypeError(f'data argument must be a bytes-like object, ' f'not {type(data).__name__!r}') if not data: return if self._address: if addr not in (None, self._address): raise ValueError( f'Invalid address: must be None or {self._address}') addr = self._address if self._conn_lost and self._address: if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: logger.warning('socket.send() raised exception.') self._conn_lost += 1 return if not self._buffer: # Attempt to send it right away first. try: if self._extra['peername']: self._sock.send(data) else: self._sock.sendto(data, addr) return except (BlockingIOError, InterruptedError): self._loop._add_writer(self._sock_fd, self._sendto_ready) except OSError as exc: self._protocol.error_received(exc) return except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._fatal_error( exc, 'Fatal write error on datagram transport') return # Ensure that what we buffer is immutable. self._buffer.append((bytes(data), addr)) self._maybe_pause_protocol() def _sendto_ready(self): while self._buffer: data, addr = self._buffer.popleft() try: if self._extra['peername']: self._sock.send(data) else: self._sock.sendto(data, addr) except (BlockingIOError, InterruptedError): self._buffer.appendleft((data, addr)) # Try again later. break except OSError as exc: self._protocol.error_received(exc) return except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: self._fatal_error( exc, 'Fatal write error on datagram transport') return self._maybe_resume_protocol() # May append to buffer. if not self._buffer: self._loop._remove_writer(self._sock_fd) if self._closing: self._call_connection_lost(None) runners.py 0000644 00000004006 15030732674 0006621 0 ustar 00 __all__ = 'run', from . import coroutines from . import events from . import tasks def run(main, *, debug=None): """Execute the coroutine and return the result. This function runs the passed coroutine, taking care of managing the asyncio event loop and finalizing asynchronous generators. This function cannot be called when another asyncio event loop is running in the same thread. If debug is True, the event loop will be run in debug mode. This function always creates a new event loop and closes it at the end. It should be used as a main entry point for asyncio programs, and should ideally only be called once. Example: async def main(): await asyncio.sleep(1) print('hello') asyncio.run(main()) """ if events._get_running_loop() is not None: raise RuntimeError( "asyncio.run() cannot be called from a running event loop") if not coroutines.iscoroutine(main): raise ValueError("a coroutine was expected, got {!r}".format(main)) loop = events.new_event_loop() try: events.set_event_loop(loop) if debug is not None: loop.set_debug(debug) return loop.run_until_complete(main) finally: try: _cancel_all_tasks(loop) loop.run_until_complete(loop.shutdown_asyncgens()) finally: events.set_event_loop(None) loop.close() def _cancel_all_tasks(loop): to_cancel = tasks.all_tasks(loop) if not to_cancel: return for task in to_cancel: task.cancel() loop.run_until_complete( tasks.gather(*to_cancel, loop=loop, return_exceptions=True)) for task in to_cancel: if task.cancelled(): continue if task.exception() is not None: loop.call_exception_handler({ 'message': 'unhandled exception during asyncio.run() shutdown', 'exception': task.exception(), 'task': task, }) __pycache__/tasks.cpython-38.opt-1.pyc 0000644 00000057243 15030732674 0013512 0 ustar 00 U e5d�� � @ sv d Z dZddlZddlZddlZddlZddlZddlZddl Z ddl Z ddlmZ ddlm Z ddlmZ ddlmZ dd lmZ dd l mZ e�d�jZdBdd�ZdCd d�ZdDdd�Zdd� ZG dd� dej�ZeZzddlZW n ek r� Y nX ej ZZdd�dd�Zejj Z ejj!Z!ejj"Z"dde"d�dd�Z#dd� Z$dd�dd�Z%d d!� Z&d"d#� Z'ddd$�d%d&�Z(ej)d'd(� �Z*dEdd�d)d*�Z+dd�d+d,�Z,ej)d-d.� �Z-ee-_G d/d0� d0ej.�Z/dd1d2�d3d4�Z0dd�d5d6�Z1d7d8� Z2e �3� Z4i Z5d9d:� Z6d;d<� Z7d=d>� Z8d?d@� Z9e6Z:e9Z;e7Z<e8Z=z$ddAlm6Z6m9Z9m7Z7m8Z8m4Z4m5Z5 W n ek �r` Y nX e6Z>e9Z?e7Z@e8ZAdS )Fz0Support for tasks, coroutines and the scheduler.)�Task�create_task�FIRST_COMPLETED�FIRST_EXCEPTION� ALL_COMPLETED�wait�wait_for�as_completed�sleep�gather�shield� ensure_future�run_coroutine_threadsafe�current_task� all_tasks�_register_task�_unregister_task�_enter_task�_leave_task� N� )� base_tasks)� coroutines)�events)� exceptions)�futures)� _is_coroutinec C s | dkrt �� } t�| �S )z!Return a currently executed task.N)r �get_running_loop�_current_tasks�get��loop� r! �%/usr/lib64/python3.8/asyncio/tasks.pyr "