moving to scripts
This commit is contained in:
123
asq-env/lib/python3.9/site-packages/trio/__init__.py
Normal file
123
asq-env/lib/python3.9/site-packages/trio/__init__.py
Normal file
@@ -0,0 +1,123 @@
|
||||
"""Trio - A friendly Python library for async concurrency and I/O
|
||||
"""
|
||||
|
||||
# General layout:
|
||||
#
|
||||
# trio/_core/... is the self-contained core library. It does various
|
||||
# shenanigans to export a consistent "core API", but parts of the core API are
|
||||
# too low-level to be recommended for regular use.
|
||||
#
|
||||
# trio/*.py define a set of more usable tools on top of this. They import from
|
||||
# trio._core and from each other.
|
||||
#
|
||||
# This file pulls together the friendly public API, by re-exporting the more
|
||||
# innocuous bits of the _core API + the higher-level tools from trio/*.py.
|
||||
|
||||
from ._version import __version__
|
||||
|
||||
from ._core import (
|
||||
TrioInternalError,
|
||||
RunFinishedError,
|
||||
WouldBlock,
|
||||
Cancelled,
|
||||
BusyResourceError,
|
||||
ClosedResourceError,
|
||||
MultiError,
|
||||
run,
|
||||
open_nursery,
|
||||
CancelScope,
|
||||
current_effective_deadline,
|
||||
TASK_STATUS_IGNORED,
|
||||
current_time,
|
||||
BrokenResourceError,
|
||||
EndOfChannel,
|
||||
Nursery,
|
||||
)
|
||||
|
||||
from ._timeouts import (
|
||||
move_on_at,
|
||||
move_on_after,
|
||||
sleep_forever,
|
||||
sleep_until,
|
||||
sleep,
|
||||
fail_at,
|
||||
fail_after,
|
||||
TooSlowError,
|
||||
)
|
||||
|
||||
from ._sync import (
|
||||
Event,
|
||||
CapacityLimiter,
|
||||
Semaphore,
|
||||
Lock,
|
||||
StrictFIFOLock,
|
||||
Condition,
|
||||
)
|
||||
|
||||
from ._highlevel_generic import aclose_forcefully, StapledStream
|
||||
|
||||
from ._channel import (
|
||||
open_memory_channel,
|
||||
MemorySendChannel,
|
||||
MemoryReceiveChannel,
|
||||
)
|
||||
|
||||
from ._signals import open_signal_receiver
|
||||
|
||||
from ._highlevel_socket import SocketStream, SocketListener
|
||||
|
||||
from ._file_io import open_file, wrap_file
|
||||
|
||||
from ._path import Path
|
||||
|
||||
from ._subprocess import Process, open_process, run_process
|
||||
|
||||
from ._ssl import SSLStream, SSLListener, NeedHandshakeError
|
||||
|
||||
from ._highlevel_serve_listeners import serve_listeners
|
||||
|
||||
from ._highlevel_open_tcp_stream import open_tcp_stream
|
||||
|
||||
from ._highlevel_open_tcp_listeners import open_tcp_listeners, serve_tcp
|
||||
|
||||
from ._highlevel_open_unix_stream import open_unix_socket
|
||||
|
||||
from ._highlevel_ssl_helpers import (
|
||||
open_ssl_over_tcp_stream,
|
||||
open_ssl_over_tcp_listeners,
|
||||
serve_ssl_over_tcp,
|
||||
)
|
||||
|
||||
from ._deprecate import TrioDeprecationWarning
|
||||
|
||||
# Submodules imported by default
|
||||
from . import lowlevel
|
||||
from . import socket
|
||||
from . import abc
|
||||
from . import from_thread
|
||||
from . import to_thread
|
||||
|
||||
# Not imported by default, but mentioned here so static analysis tools like
|
||||
# pylint will know that it exists.
|
||||
if False:
|
||||
from . import testing
|
||||
|
||||
from . import _deprecate
|
||||
|
||||
_deprecate.enable_attribute_deprecations(__name__)
|
||||
|
||||
# Having the public path in .__module__ attributes is important for:
|
||||
# - exception names in printed tracebacks
|
||||
# - sphinx :show-inheritance:
|
||||
# - deprecation warnings
|
||||
# - pickle
|
||||
# - probably other stuff
|
||||
from ._util import fixup_module_metadata
|
||||
|
||||
fixup_module_metadata(__name__, globals())
|
||||
fixup_module_metadata(lowlevel.__name__, lowlevel.__dict__)
|
||||
fixup_module_metadata(socket.__name__, socket.__dict__)
|
||||
fixup_module_metadata(abc.__name__, abc.__dict__)
|
||||
fixup_module_metadata(from_thread.__name__, from_thread.__dict__)
|
||||
fixup_module_metadata(to_thread.__name__, to_thread.__dict__)
|
||||
del fixup_module_metadata
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
652
asq-env/lib/python3.9/site-packages/trio/_abc.py
Normal file
652
asq-env/lib/python3.9/site-packages/trio/_abc.py
Normal file
@@ -0,0 +1,652 @@
|
||||
# coding: utf-8
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from typing import Generic, TypeVar
|
||||
import trio
|
||||
|
||||
|
||||
# We use ABCMeta instead of ABC, plus set __slots__=(), so as not to force a
|
||||
# __dict__ onto subclasses.
|
||||
class Clock(metaclass=ABCMeta):
|
||||
"""The interface for custom run loop clocks."""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
@abstractmethod
|
||||
def start_clock(self):
|
||||
"""Do any setup this clock might need.
|
||||
|
||||
Called at the beginning of the run.
|
||||
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def current_time(self):
|
||||
"""Return the current time, according to this clock.
|
||||
|
||||
This is used to implement functions like :func:`trio.current_time` and
|
||||
:func:`trio.move_on_after`.
|
||||
|
||||
Returns:
|
||||
float: The current time.
|
||||
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def deadline_to_sleep_time(self, deadline):
|
||||
"""Compute the real time until the given deadline.
|
||||
|
||||
This is called before we enter a system-specific wait function like
|
||||
:func:`select.select`, to get the timeout to pass.
|
||||
|
||||
For a clock using wall-time, this should be something like::
|
||||
|
||||
return deadline - self.current_time()
|
||||
|
||||
but of course it may be different if you're implementing some kind of
|
||||
virtual clock.
|
||||
|
||||
Args:
|
||||
deadline (float): The absolute time of the next deadline,
|
||||
according to this clock.
|
||||
|
||||
Returns:
|
||||
float: The number of real seconds to sleep until the given
|
||||
deadline. May be :data:`math.inf`.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class Instrument(metaclass=ABCMeta):
|
||||
"""The interface for run loop instrumentation.
|
||||
|
||||
Instruments don't have to inherit from this abstract base class, and all
|
||||
of these methods are optional. This class serves mostly as documentation.
|
||||
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def before_run(self):
|
||||
"""Called at the beginning of :func:`trio.run`."""
|
||||
|
||||
def after_run(self):
|
||||
"""Called just before :func:`trio.run` returns."""
|
||||
|
||||
def task_spawned(self, task):
|
||||
"""Called when the given task is created.
|
||||
|
||||
Args:
|
||||
task (trio.lowlevel.Task): The new task.
|
||||
|
||||
"""
|
||||
|
||||
def task_scheduled(self, task):
|
||||
"""Called when the given task becomes runnable.
|
||||
|
||||
It may still be some time before it actually runs, if there are other
|
||||
runnable tasks ahead of it.
|
||||
|
||||
Args:
|
||||
task (trio.lowlevel.Task): The task that became runnable.
|
||||
|
||||
"""
|
||||
|
||||
def before_task_step(self, task):
|
||||
"""Called immediately before we resume running the given task.
|
||||
|
||||
Args:
|
||||
task (trio.lowlevel.Task): The task that is about to run.
|
||||
|
||||
"""
|
||||
|
||||
def after_task_step(self, task):
|
||||
"""Called when we return to the main run loop after a task has yielded.
|
||||
|
||||
Args:
|
||||
task (trio.lowlevel.Task): The task that just ran.
|
||||
|
||||
"""
|
||||
|
||||
def task_exited(self, task):
|
||||
"""Called when the given task exits.
|
||||
|
||||
Args:
|
||||
task (trio.lowlevel.Task): The finished task.
|
||||
|
||||
"""
|
||||
|
||||
def before_io_wait(self, timeout):
|
||||
"""Called before blocking to wait for I/O readiness.
|
||||
|
||||
Args:
|
||||
timeout (float): The number of seconds we are willing to wait.
|
||||
|
||||
"""
|
||||
|
||||
def after_io_wait(self, timeout):
|
||||
"""Called after handling pending I/O.
|
||||
|
||||
Args:
|
||||
timeout (float): The number of seconds we were willing to
|
||||
wait. This much time may or may not have elapsed, depending on
|
||||
whether any I/O was ready.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class HostnameResolver(metaclass=ABCMeta):
|
||||
"""If you have a custom hostname resolver, then implementing
|
||||
:class:`HostnameResolver` allows you to register this to be used by Trio.
|
||||
|
||||
See :func:`trio.socket.set_custom_hostname_resolver`.
|
||||
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
@abstractmethod
|
||||
async def getaddrinfo(self, host, port, family=0, type=0, proto=0, flags=0):
|
||||
"""A custom implementation of :func:`~trio.socket.getaddrinfo`.
|
||||
|
||||
Called by :func:`trio.socket.getaddrinfo`.
|
||||
|
||||
If ``host`` is given as a numeric IP address, then
|
||||
:func:`~trio.socket.getaddrinfo` may handle the request itself rather
|
||||
than calling this method.
|
||||
|
||||
Any required IDNA encoding is handled before calling this function;
|
||||
your implementation can assume that it will never see U-labels like
|
||||
``"café.com"``, and only needs to handle A-labels like
|
||||
``b"xn--caf-dma.com"``.
|
||||
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def getnameinfo(self, sockaddr, flags):
|
||||
"""A custom implementation of :func:`~trio.socket.getnameinfo`.
|
||||
|
||||
Called by :func:`trio.socket.getnameinfo`.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class SocketFactory(metaclass=ABCMeta):
|
||||
"""If you write a custom class implementing the Trio socket interface,
|
||||
then you can use a :class:`SocketFactory` to get Trio to use it.
|
||||
|
||||
See :func:`trio.socket.set_custom_socket_factory`.
|
||||
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def socket(self, family=None, type=None, proto=None):
|
||||
"""Create and return a socket object.
|
||||
|
||||
Your socket object must inherit from :class:`trio.socket.SocketType`,
|
||||
which is an empty class whose only purpose is to "mark" which classes
|
||||
should be considered valid Trio sockets.
|
||||
|
||||
Called by :func:`trio.socket.socket`.
|
||||
|
||||
Note that unlike :func:`trio.socket.socket`, this does not take a
|
||||
``fileno=`` argument. If a ``fileno=`` is specified, then
|
||||
:func:`trio.socket.socket` returns a regular Trio socket object
|
||||
instead of calling this method.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class AsyncResource(metaclass=ABCMeta):
|
||||
"""A standard interface for resources that needs to be cleaned up, and
|
||||
where that cleanup may require blocking operations.
|
||||
|
||||
This class distinguishes between "graceful" closes, which may perform I/O
|
||||
and thus block, and a "forceful" close, which cannot. For example, cleanly
|
||||
shutting down a TLS-encrypted connection requires sending a "goodbye"
|
||||
message; but if a peer has become non-responsive, then sending this
|
||||
message might block forever, so we may want to just drop the connection
|
||||
instead. Therefore the :meth:`aclose` method is unusual in that it
|
||||
should always close the connection (or at least make its best attempt)
|
||||
*even if it fails*; failure indicates a failure to achieve grace, not a
|
||||
failure to close the connection.
|
||||
|
||||
Objects that implement this interface can be used as async context
|
||||
managers, i.e., you can write::
|
||||
|
||||
async with create_resource() as some_async_resource:
|
||||
...
|
||||
|
||||
Entering the context manager is synchronous (not a checkpoint); exiting it
|
||||
calls :meth:`aclose`. The default implementations of
|
||||
``__aenter__`` and ``__aexit__`` should be adequate for all subclasses.
|
||||
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
@abstractmethod
|
||||
async def aclose(self):
|
||||
"""Close this resource, possibly blocking.
|
||||
|
||||
IMPORTANT: This method may block in order to perform a "graceful"
|
||||
shutdown. But, if this fails, then it still *must* close any
|
||||
underlying resources before returning. An error from this method
|
||||
indicates a failure to achieve grace, *not* a failure to close the
|
||||
connection.
|
||||
|
||||
For example, suppose we call :meth:`aclose` on a TLS-encrypted
|
||||
connection. This requires sending a "goodbye" message; but if the peer
|
||||
has become non-responsive, then our attempt to send this message might
|
||||
block forever, and eventually time out and be cancelled. In this case
|
||||
the :meth:`aclose` method on :class:`~trio.SSLStream` will
|
||||
immediately close the underlying transport stream using
|
||||
:func:`trio.aclose_forcefully` before raising :exc:`~trio.Cancelled`.
|
||||
|
||||
If the resource is already closed, then this method should silently
|
||||
succeed.
|
||||
|
||||
Once this method completes, any other pending or future operations on
|
||||
this resource should generally raise :exc:`~trio.ClosedResourceError`,
|
||||
unless there's a good reason to do otherwise.
|
||||
|
||||
See also: :func:`trio.aclose_forcefully`.
|
||||
|
||||
"""
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, *args):
|
||||
await self.aclose()
|
||||
|
||||
|
||||
class SendStream(AsyncResource):
|
||||
"""A standard interface for sending data on a byte stream.
|
||||
|
||||
The underlying stream may be unidirectional, or bidirectional. If it's
|
||||
bidirectional, then you probably want to also implement
|
||||
:class:`ReceiveStream`, which makes your object a :class:`Stream`.
|
||||
|
||||
:class:`SendStream` objects also implement the :class:`AsyncResource`
|
||||
interface, so they can be closed by calling :meth:`~AsyncResource.aclose`
|
||||
or using an ``async with`` block.
|
||||
|
||||
If you want to send Python objects rather than raw bytes, see
|
||||
:class:`SendChannel`.
|
||||
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
@abstractmethod
|
||||
async def send_all(self, data):
|
||||
"""Sends the given data through the stream, blocking if necessary.
|
||||
|
||||
Args:
|
||||
data (bytes, bytearray, or memoryview): The data to send.
|
||||
|
||||
Raises:
|
||||
trio.BusyResourceError: if another task is already executing a
|
||||
:meth:`send_all`, :meth:`wait_send_all_might_not_block`, or
|
||||
:meth:`HalfCloseableStream.send_eof` on this stream.
|
||||
trio.BrokenResourceError: if something has gone wrong, and the stream
|
||||
is broken.
|
||||
trio.ClosedResourceError: if you previously closed this stream
|
||||
object, or if another task closes this stream object while
|
||||
:meth:`send_all` is running.
|
||||
|
||||
Most low-level operations in Trio provide a guarantee: if they raise
|
||||
:exc:`trio.Cancelled`, this means that they had no effect, so the
|
||||
system remains in a known state. This is **not true** for
|
||||
:meth:`send_all`. If this operation raises :exc:`trio.Cancelled` (or
|
||||
any other exception for that matter), then it may have sent some, all,
|
||||
or none of the requested data, and there is no way to know which.
|
||||
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def wait_send_all_might_not_block(self):
|
||||
"""Block until it's possible that :meth:`send_all` might not block.
|
||||
|
||||
This method may return early: it's possible that after it returns,
|
||||
:meth:`send_all` will still block. (In the worst case, if no better
|
||||
implementation is available, then it might always return immediately
|
||||
without blocking. It's nice to do better than that when possible,
|
||||
though.)
|
||||
|
||||
This method **must not** return *late*: if it's possible for
|
||||
:meth:`send_all` to complete without blocking, then it must
|
||||
return. When implementing it, err on the side of returning early.
|
||||
|
||||
Raises:
|
||||
trio.BusyResourceError: if another task is already executing a
|
||||
:meth:`send_all`, :meth:`wait_send_all_might_not_block`, or
|
||||
:meth:`HalfCloseableStream.send_eof` on this stream.
|
||||
trio.BrokenResourceError: if something has gone wrong, and the stream
|
||||
is broken.
|
||||
trio.ClosedResourceError: if you previously closed this stream
|
||||
object, or if another task closes this stream object while
|
||||
:meth:`wait_send_all_might_not_block` is running.
|
||||
|
||||
Note:
|
||||
|
||||
This method is intended to aid in implementing protocols that want
|
||||
to delay choosing which data to send until the last moment. E.g.,
|
||||
suppose you're working on an implementation of a remote display server
|
||||
like `VNC
|
||||
<https://en.wikipedia.org/wiki/Virtual_Network_Computing>`__, and
|
||||
the network connection is currently backed up so that if you call
|
||||
:meth:`send_all` now then it will sit for 0.5 seconds before actually
|
||||
sending anything. In this case it doesn't make sense to take a
|
||||
screenshot, then wait 0.5 seconds, and then send it, because the
|
||||
screen will keep changing while you wait; it's better to wait 0.5
|
||||
seconds, then take the screenshot, and then send it, because this
|
||||
way the data you deliver will be more
|
||||
up-to-date. Using :meth:`wait_send_all_might_not_block` makes it
|
||||
possible to implement the better strategy.
|
||||
|
||||
If you use this method, you might also want to read up on
|
||||
``TCP_NOTSENT_LOWAT``.
|
||||
|
||||
Further reading:
|
||||
|
||||
* `Prioritization Only Works When There's Pending Data to Prioritize
|
||||
<https://insouciant.org/tech/prioritization-only-works-when-theres-pending-data-to-prioritize/>`__
|
||||
|
||||
* WWDC 2015: Your App and Next Generation Networks: `slides
|
||||
<http://devstreaming.apple.com/videos/wwdc/2015/719ui2k57m/719/719_your_app_and_next_generation_networks.pdf?dl=1>`__,
|
||||
`video and transcript
|
||||
<https://developer.apple.com/videos/play/wwdc2015/719/>`__
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class ReceiveStream(AsyncResource):
|
||||
"""A standard interface for receiving data on a byte stream.
|
||||
|
||||
The underlying stream may be unidirectional, or bidirectional. If it's
|
||||
bidirectional, then you probably want to also implement
|
||||
:class:`SendStream`, which makes your object a :class:`Stream`.
|
||||
|
||||
:class:`ReceiveStream` objects also implement the :class:`AsyncResource`
|
||||
interface, so they can be closed by calling :meth:`~AsyncResource.aclose`
|
||||
or using an ``async with`` block.
|
||||
|
||||
If you want to receive Python objects rather than raw bytes, see
|
||||
:class:`ReceiveChannel`.
|
||||
|
||||
`ReceiveStream` objects can be used in ``async for`` loops. Each iteration
|
||||
will produce an arbitrary sized chunk of bytes, like calling
|
||||
`receive_some` with no arguments. Every chunk will contain at least one
|
||||
byte, and the loop automatically exits when reaching end-of-file.
|
||||
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
@abstractmethod
|
||||
async def receive_some(self, max_bytes=None):
|
||||
"""Wait until there is data available on this stream, and then return
|
||||
some of it.
|
||||
|
||||
A return value of ``b""`` (an empty bytestring) indicates that the
|
||||
stream has reached end-of-file. Implementations should be careful that
|
||||
they return ``b""`` if, and only if, the stream has reached
|
||||
end-of-file!
|
||||
|
||||
Args:
|
||||
max_bytes (int): The maximum number of bytes to return. Must be
|
||||
greater than zero. Optional; if omitted, then the stream object
|
||||
is free to pick a reasonable default.
|
||||
|
||||
Returns:
|
||||
bytes or bytearray: The data received.
|
||||
|
||||
Raises:
|
||||
trio.BusyResourceError: if two tasks attempt to call
|
||||
:meth:`receive_some` on the same stream at the same time.
|
||||
trio.BrokenResourceError: if something has gone wrong, and the stream
|
||||
is broken.
|
||||
trio.ClosedResourceError: if you previously closed this stream
|
||||
object, or if another task closes this stream object while
|
||||
:meth:`receive_some` is running.
|
||||
|
||||
"""
|
||||
|
||||
def __aiter__(self):
|
||||
return self
|
||||
|
||||
async def __anext__(self):
|
||||
data = await self.receive_some()
|
||||
if not data:
|
||||
raise StopAsyncIteration
|
||||
return data
|
||||
|
||||
|
||||
class Stream(SendStream, ReceiveStream):
|
||||
"""A standard interface for interacting with bidirectional byte streams.
|
||||
|
||||
A :class:`Stream` is an object that implements both the
|
||||
:class:`SendStream` and :class:`ReceiveStream` interfaces.
|
||||
|
||||
If implementing this interface, you should consider whether you can go one
|
||||
step further and implement :class:`HalfCloseableStream`.
|
||||
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
|
||||
class HalfCloseableStream(Stream):
|
||||
"""This interface extends :class:`Stream` to also allow closing the send
|
||||
part of the stream without closing the receive part.
|
||||
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
@abstractmethod
|
||||
async def send_eof(self):
|
||||
"""Send an end-of-file indication on this stream, if possible.
|
||||
|
||||
The difference between :meth:`send_eof` and
|
||||
:meth:`~AsyncResource.aclose` is that :meth:`send_eof` is a
|
||||
*unidirectional* end-of-file indication. After you call this method,
|
||||
you shouldn't try sending any more data on this stream, and your
|
||||
remote peer should receive an end-of-file indication (eventually,
|
||||
after receiving all the data you sent before that). But, they may
|
||||
continue to send data to you, and you can continue to receive it by
|
||||
calling :meth:`~ReceiveStream.receive_some`. You can think of it as
|
||||
calling :meth:`~AsyncResource.aclose` on just the
|
||||
:class:`SendStream` "half" of the stream object (and in fact that's
|
||||
literally how :class:`trio.StapledStream` implements it).
|
||||
|
||||
Examples:
|
||||
|
||||
* On a socket, this corresponds to ``shutdown(..., SHUT_WR)`` (`man
|
||||
page <https://linux.die.net/man/2/shutdown>`__).
|
||||
|
||||
* The SSH protocol provides the ability to multiplex bidirectional
|
||||
"channels" on top of a single encrypted connection. A Trio
|
||||
implementation of SSH could expose these channels as
|
||||
:class:`HalfCloseableStream` objects, and calling :meth:`send_eof`
|
||||
would send an ``SSH_MSG_CHANNEL_EOF`` request (see `RFC 4254 §5.3
|
||||
<https://tools.ietf.org/html/rfc4254#section-5.3>`__).
|
||||
|
||||
* On an SSL/TLS-encrypted connection, the protocol doesn't provide any
|
||||
way to do a unidirectional shutdown without closing the connection
|
||||
entirely, so :class:`~trio.SSLStream` implements
|
||||
:class:`Stream`, not :class:`HalfCloseableStream`.
|
||||
|
||||
If an EOF has already been sent, then this method should silently
|
||||
succeed.
|
||||
|
||||
Raises:
|
||||
trio.BusyResourceError: if another task is already executing a
|
||||
:meth:`~SendStream.send_all`,
|
||||
:meth:`~SendStream.wait_send_all_might_not_block`, or
|
||||
:meth:`send_eof` on this stream.
|
||||
trio.BrokenResourceError: if something has gone wrong, and the stream
|
||||
is broken.
|
||||
trio.ClosedResourceError: if you previously closed this stream
|
||||
object, or if another task closes this stream object while
|
||||
:meth:`send_eof` is running.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
# A regular invariant generic type
|
||||
T = TypeVar("T")
|
||||
|
||||
# The type of object produced by a ReceiveChannel (covariant because
|
||||
# ReceiveChannel[Derived] can be passed to someone expecting
|
||||
# ReceiveChannel[Base])
|
||||
ReceiveType = TypeVar("ReceiveType", covariant=True)
|
||||
|
||||
# The type of object accepted by a SendChannel (contravariant because
|
||||
# SendChannel[Base] can be passed to someone expecting
|
||||
# SendChannel[Derived])
|
||||
SendType = TypeVar("SendType", contravariant=True)
|
||||
|
||||
# The type of object produced by a Listener (covariant plus must be
|
||||
# an AsyncResource)
|
||||
T_resource = TypeVar("T_resource", bound=AsyncResource, covariant=True)
|
||||
|
||||
|
||||
class Listener(AsyncResource, Generic[T_resource]):
|
||||
"""A standard interface for listening for incoming connections.
|
||||
|
||||
:class:`Listener` objects also implement the :class:`AsyncResource`
|
||||
interface, so they can be closed by calling :meth:`~AsyncResource.aclose`
|
||||
or using an ``async with`` block.
|
||||
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
@abstractmethod
|
||||
async def accept(self):
|
||||
"""Wait until an incoming connection arrives, and then return it.
|
||||
|
||||
Returns:
|
||||
AsyncResource: An object representing the incoming connection. In
|
||||
practice this is generally some kind of :class:`Stream`,
|
||||
but in principle you could also define a :class:`Listener` that
|
||||
returned, say, channel objects.
|
||||
|
||||
Raises:
|
||||
trio.BusyResourceError: if two tasks attempt to call
|
||||
:meth:`accept` on the same listener at the same time.
|
||||
trio.ClosedResourceError: if you previously closed this listener
|
||||
object, or if another task closes this listener object while
|
||||
:meth:`accept` is running.
|
||||
|
||||
Listeners don't generally raise :exc:`~trio.BrokenResourceError`,
|
||||
because for listeners there is no general condition of "the
|
||||
network/remote peer broke the connection" that can be handled in a
|
||||
generic way, like there is for streams. Other errors *can* occur and
|
||||
be raised from :meth:`accept` – for example, if you run out of file
|
||||
descriptors then you might get an :class:`OSError` with its errno set
|
||||
to ``EMFILE``.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class SendChannel(AsyncResource, Generic[SendType]):
|
||||
"""A standard interface for sending Python objects to some receiver.
|
||||
|
||||
`SendChannel` objects also implement the `AsyncResource` interface, so
|
||||
they can be closed by calling `~AsyncResource.aclose` or using an ``async
|
||||
with`` block.
|
||||
|
||||
If you want to send raw bytes rather than Python objects, see
|
||||
`SendStream`.
|
||||
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
@abstractmethod
|
||||
async def send(self, value: SendType) -> None:
|
||||
"""Attempt to send an object through the channel, blocking if necessary.
|
||||
|
||||
Args:
|
||||
value (object): The object to send.
|
||||
|
||||
Raises:
|
||||
trio.BrokenResourceError: if something has gone wrong, and the
|
||||
channel is broken. For example, you may get this if the receiver
|
||||
has already been closed.
|
||||
trio.ClosedResourceError: if you previously closed this
|
||||
:class:`SendChannel` object, or if another task closes it while
|
||||
:meth:`send` is running.
|
||||
trio.BusyResourceError: some channels allow multiple tasks to call
|
||||
`send` at the same time, but others don't. If you try to call
|
||||
`send` simultaneously from multiple tasks on a channel that
|
||||
doesn't support it, then you can get `~trio.BusyResourceError`.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class ReceiveChannel(AsyncResource, Generic[ReceiveType]):
|
||||
"""A standard interface for receiving Python objects from some sender.
|
||||
|
||||
You can iterate over a :class:`ReceiveChannel` using an ``async for``
|
||||
loop::
|
||||
|
||||
async for value in receive_channel:
|
||||
...
|
||||
|
||||
This is equivalent to calling :meth:`receive` repeatedly. The loop exits
|
||||
without error when `receive` raises `~trio.EndOfChannel`.
|
||||
|
||||
`ReceiveChannel` objects also implement the `AsyncResource` interface, so
|
||||
they can be closed by calling `~AsyncResource.aclose` or using an ``async
|
||||
with`` block.
|
||||
|
||||
If you want to receive raw bytes rather than Python objects, see
|
||||
`ReceiveStream`.
|
||||
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
@abstractmethod
|
||||
async def receive(self) -> ReceiveType:
|
||||
"""Attempt to receive an incoming object, blocking if necessary.
|
||||
|
||||
Returns:
|
||||
object: Whatever object was received.
|
||||
|
||||
Raises:
|
||||
trio.EndOfChannel: if the sender has been closed cleanly, and no
|
||||
more objects are coming. This is not an error condition.
|
||||
trio.ClosedResourceError: if you previously closed this
|
||||
:class:`ReceiveChannel` object.
|
||||
trio.BrokenResourceError: if something has gone wrong, and the
|
||||
channel is broken.
|
||||
trio.BusyResourceError: some channels allow multiple tasks to call
|
||||
`receive` at the same time, but others don't. If you try to call
|
||||
`receive` simultaneously from multiple tasks on a channel that
|
||||
doesn't support it, then you can get `~trio.BusyResourceError`.
|
||||
|
||||
"""
|
||||
|
||||
def __aiter__(self):
|
||||
return self
|
||||
|
||||
async def __anext__(self) -> ReceiveType:
|
||||
try:
|
||||
return await self.receive()
|
||||
except trio.EndOfChannel:
|
||||
raise StopAsyncIteration
|
||||
|
||||
|
||||
class Channel(SendChannel[T], ReceiveChannel[T]):
|
||||
"""A standard interface for interacting with bidirectional channels.
|
||||
|
||||
A `Channel` is an object that implements both the `SendChannel` and
|
||||
`ReceiveChannel` interfaces, so you can both send and receive objects.
|
||||
|
||||
"""
|
||||
386
asq-env/lib/python3.9/site-packages/trio/_channel.py
Normal file
386
asq-env/lib/python3.9/site-packages/trio/_channel.py
Normal file
@@ -0,0 +1,386 @@
|
||||
from collections import deque, OrderedDict
|
||||
from math import inf
|
||||
|
||||
import attr
|
||||
from outcome import Error, Value
|
||||
|
||||
from .abc import SendChannel, ReceiveChannel, Channel
|
||||
from ._util import generic_function, NoPublicConstructor
|
||||
|
||||
import trio
|
||||
from ._core import enable_ki_protection
|
||||
|
||||
|
||||
@generic_function
|
||||
def open_memory_channel(max_buffer_size):
|
||||
"""Open a channel for passing objects between tasks within a process.
|
||||
|
||||
Memory channels are lightweight, cheap to allocate, and entirely
|
||||
in-memory. They don't involve any operating-system resources, or any kind
|
||||
of serialization. They just pass Python objects directly between tasks
|
||||
(with a possible stop in an internal buffer along the way).
|
||||
|
||||
Channel objects can be closed by calling `~trio.abc.AsyncResource.aclose`
|
||||
or using ``async with``. They are *not* automatically closed when garbage
|
||||
collected. Closing memory channels isn't mandatory, but it is generally a
|
||||
good idea, because it helps avoid situations where tasks get stuck waiting
|
||||
on a channel when there's no-one on the other side. See
|
||||
:ref:`channel-shutdown` for details.
|
||||
|
||||
Memory channel operations are all atomic with respect to
|
||||
cancellation, either `~trio.abc.ReceiveChannel.receive` will
|
||||
successfully return an object, or it will raise :exc:`Cancelled`
|
||||
while leaving the channel unchanged.
|
||||
|
||||
Args:
|
||||
max_buffer_size (int or math.inf): The maximum number of items that can
|
||||
be buffered in the channel before :meth:`~trio.abc.SendChannel.send`
|
||||
blocks. Choosing a sensible value here is important to ensure that
|
||||
backpressure is communicated promptly and avoid unnecessary latency;
|
||||
see :ref:`channel-buffering` for more details. If in doubt, use 0.
|
||||
|
||||
Returns:
|
||||
A pair ``(send_channel, receive_channel)``. If you have
|
||||
trouble remembering which order these go in, remember: data
|
||||
flows from left → right.
|
||||
|
||||
In addition to the standard channel methods, all memory channel objects
|
||||
provide a ``statistics()`` method, which returns an object with the
|
||||
following fields:
|
||||
|
||||
* ``current_buffer_used``: The number of items currently stored in the
|
||||
channel buffer.
|
||||
* ``max_buffer_size``: The maximum number of items allowed in the buffer,
|
||||
as passed to :func:`open_memory_channel`.
|
||||
* ``open_send_channels``: The number of open
|
||||
:class:`MemorySendChannel` endpoints pointing to this channel.
|
||||
Initially 1, but can be increased by
|
||||
:meth:`MemorySendChannel.clone`.
|
||||
* ``open_receive_channels``: Likewise, but for open
|
||||
:class:`MemoryReceiveChannel` endpoints.
|
||||
* ``tasks_waiting_send``: The number of tasks blocked in ``send`` on this
|
||||
channel (summing over all clones).
|
||||
* ``tasks_waiting_receive``: The number of tasks blocked in ``receive`` on
|
||||
this channel (summing over all clones).
|
||||
|
||||
"""
|
||||
if max_buffer_size != inf and not isinstance(max_buffer_size, int):
|
||||
raise TypeError("max_buffer_size must be an integer or math.inf")
|
||||
if max_buffer_size < 0:
|
||||
raise ValueError("max_buffer_size must be >= 0")
|
||||
state = MemoryChannelState(max_buffer_size)
|
||||
return (
|
||||
MemorySendChannel._create(state),
|
||||
MemoryReceiveChannel._create(state),
|
||||
)
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True)
|
||||
class MemoryChannelStats:
|
||||
current_buffer_used = attr.ib()
|
||||
max_buffer_size = attr.ib()
|
||||
open_send_channels = attr.ib()
|
||||
open_receive_channels = attr.ib()
|
||||
tasks_waiting_send = attr.ib()
|
||||
tasks_waiting_receive = attr.ib()
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class MemoryChannelState:
|
||||
max_buffer_size = attr.ib()
|
||||
data = attr.ib(factory=deque)
|
||||
# Counts of open endpoints using this state
|
||||
open_send_channels = attr.ib(default=0)
|
||||
open_receive_channels = attr.ib(default=0)
|
||||
# {task: value}
|
||||
send_tasks = attr.ib(factory=OrderedDict)
|
||||
# {task: None}
|
||||
receive_tasks = attr.ib(factory=OrderedDict)
|
||||
|
||||
def statistics(self):
|
||||
return MemoryChannelStats(
|
||||
current_buffer_used=len(self.data),
|
||||
max_buffer_size=self.max_buffer_size,
|
||||
open_send_channels=self.open_send_channels,
|
||||
open_receive_channels=self.open_receive_channels,
|
||||
tasks_waiting_send=len(self.send_tasks),
|
||||
tasks_waiting_receive=len(self.receive_tasks),
|
||||
)
|
||||
|
||||
|
||||
@attr.s(eq=False, repr=False)
|
||||
class MemorySendChannel(SendChannel, metaclass=NoPublicConstructor):
|
||||
_state = attr.ib()
|
||||
_closed = attr.ib(default=False)
|
||||
# This is just the tasks waiting on *this* object. As compared to
|
||||
# self._state.send_tasks, which includes tasks from this object and
|
||||
# all clones.
|
||||
_tasks = attr.ib(factory=set)
|
||||
|
||||
def __attrs_post_init__(self):
|
||||
self._state.open_send_channels += 1
|
||||
|
||||
def __repr__(self):
|
||||
return "<send channel at {:#x}, using buffer at {:#x}>".format(
|
||||
id(self), id(self._state)
|
||||
)
|
||||
|
||||
def statistics(self):
|
||||
# XX should we also report statistics specific to this object?
|
||||
return self._state.statistics()
|
||||
|
||||
@enable_ki_protection
|
||||
def send_nowait(self, value):
|
||||
"""Like `~trio.abc.SendChannel.send`, but if the channel's buffer is
|
||||
full, raises `WouldBlock` instead of blocking.
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise trio.ClosedResourceError
|
||||
if self._state.open_receive_channels == 0:
|
||||
raise trio.BrokenResourceError
|
||||
if self._state.receive_tasks:
|
||||
assert not self._state.data
|
||||
task, _ = self._state.receive_tasks.popitem(last=False)
|
||||
task.custom_sleep_data._tasks.remove(task)
|
||||
trio.lowlevel.reschedule(task, Value(value))
|
||||
elif len(self._state.data) < self._state.max_buffer_size:
|
||||
self._state.data.append(value)
|
||||
else:
|
||||
raise trio.WouldBlock
|
||||
|
||||
@enable_ki_protection
|
||||
async def send(self, value):
|
||||
"""See `SendChannel.send <trio.abc.SendChannel.send>`.
|
||||
|
||||
Memory channels allow multiple tasks to call `send` at the same time.
|
||||
|
||||
"""
|
||||
await trio.lowlevel.checkpoint_if_cancelled()
|
||||
try:
|
||||
self.send_nowait(value)
|
||||
except trio.WouldBlock:
|
||||
pass
|
||||
else:
|
||||
await trio.lowlevel.cancel_shielded_checkpoint()
|
||||
return
|
||||
|
||||
task = trio.lowlevel.current_task()
|
||||
self._tasks.add(task)
|
||||
self._state.send_tasks[task] = value
|
||||
task.custom_sleep_data = self
|
||||
|
||||
def abort_fn(_):
|
||||
self._tasks.remove(task)
|
||||
del self._state.send_tasks[task]
|
||||
return trio.lowlevel.Abort.SUCCEEDED
|
||||
|
||||
await trio.lowlevel.wait_task_rescheduled(abort_fn)
|
||||
|
||||
@enable_ki_protection
|
||||
def clone(self):
|
||||
"""Clone this send channel object.
|
||||
|
||||
This returns a new `MemorySendChannel` object, which acts as a
|
||||
duplicate of the original: sending on the new object does exactly the
|
||||
same thing as sending on the old object. (If you're familiar with
|
||||
`os.dup`, then this is a similar idea.)
|
||||
|
||||
However, closing one of the objects does not close the other, and
|
||||
receivers don't get `EndOfChannel` until *all* clones have been
|
||||
closed.
|
||||
|
||||
This is useful for communication patterns that involve multiple
|
||||
producers all sending objects to the same destination. If you give
|
||||
each producer its own clone of the `MemorySendChannel`, and then make
|
||||
sure to close each `MemorySendChannel` when it's finished, receivers
|
||||
will automatically get notified when all producers are finished. See
|
||||
:ref:`channel-mpmc` for examples.
|
||||
|
||||
Raises:
|
||||
trio.ClosedResourceError: if you already closed this
|
||||
`MemorySendChannel` object.
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise trio.ClosedResourceError
|
||||
return MemorySendChannel._create(self._state)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.close()
|
||||
|
||||
@enable_ki_protection
|
||||
def close(self):
|
||||
"""Close this send channel object synchronously.
|
||||
|
||||
All channel objects have an asynchronous `~.AsyncResource.aclose` method.
|
||||
Memory channels can also be closed synchronously. This has the same
|
||||
effect on the channel and other tasks using it, but `close` is not a
|
||||
trio checkpoint. This simplifies cleaning up in cancelled tasks.
|
||||
|
||||
Using ``with send_channel:`` will close the channel object on leaving
|
||||
the with block.
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
return
|
||||
self._closed = True
|
||||
for task in self._tasks:
|
||||
trio.lowlevel.reschedule(task, Error(trio.ClosedResourceError()))
|
||||
del self._state.send_tasks[task]
|
||||
self._tasks.clear()
|
||||
self._state.open_send_channels -= 1
|
||||
if self._state.open_send_channels == 0:
|
||||
assert not self._state.send_tasks
|
||||
for task in self._state.receive_tasks:
|
||||
task.custom_sleep_data._tasks.remove(task)
|
||||
trio.lowlevel.reschedule(task, Error(trio.EndOfChannel()))
|
||||
self._state.receive_tasks.clear()
|
||||
|
||||
@enable_ki_protection
|
||||
async def aclose(self):
|
||||
self.close()
|
||||
await trio.lowlevel.checkpoint()
|
||||
|
||||
|
||||
@attr.s(eq=False, repr=False)
|
||||
class MemoryReceiveChannel(ReceiveChannel, metaclass=NoPublicConstructor):
|
||||
_state = attr.ib()
|
||||
_closed = attr.ib(default=False)
|
||||
_tasks = attr.ib(factory=set)
|
||||
|
||||
def __attrs_post_init__(self):
|
||||
self._state.open_receive_channels += 1
|
||||
|
||||
def statistics(self):
|
||||
return self._state.statistics()
|
||||
|
||||
def __repr__(self):
|
||||
return "<receive channel at {:#x}, using buffer at {:#x}>".format(
|
||||
id(self), id(self._state)
|
||||
)
|
||||
|
||||
@enable_ki_protection
|
||||
def receive_nowait(self):
|
||||
"""Like `~trio.abc.ReceiveChannel.receive`, but if there's nothing
|
||||
ready to receive, raises `WouldBlock` instead of blocking.
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise trio.ClosedResourceError
|
||||
if self._state.send_tasks:
|
||||
task, value = self._state.send_tasks.popitem(last=False)
|
||||
task.custom_sleep_data._tasks.remove(task)
|
||||
trio.lowlevel.reschedule(task)
|
||||
self._state.data.append(value)
|
||||
# Fall through
|
||||
if self._state.data:
|
||||
return self._state.data.popleft()
|
||||
if not self._state.open_send_channels:
|
||||
raise trio.EndOfChannel
|
||||
raise trio.WouldBlock
|
||||
|
||||
@enable_ki_protection
|
||||
async def receive(self):
|
||||
"""See `ReceiveChannel.receive <trio.abc.ReceiveChannel.receive>`.
|
||||
|
||||
Memory channels allow multiple tasks to call `receive` at the same
|
||||
time. The first task will get the first item sent, the second task
|
||||
will get the second item sent, and so on.
|
||||
|
||||
"""
|
||||
await trio.lowlevel.checkpoint_if_cancelled()
|
||||
try:
|
||||
value = self.receive_nowait()
|
||||
except trio.WouldBlock:
|
||||
pass
|
||||
else:
|
||||
await trio.lowlevel.cancel_shielded_checkpoint()
|
||||
return value
|
||||
|
||||
task = trio.lowlevel.current_task()
|
||||
self._tasks.add(task)
|
||||
self._state.receive_tasks[task] = None
|
||||
task.custom_sleep_data = self
|
||||
|
||||
def abort_fn(_):
|
||||
self._tasks.remove(task)
|
||||
del self._state.receive_tasks[task]
|
||||
return trio.lowlevel.Abort.SUCCEEDED
|
||||
|
||||
return await trio.lowlevel.wait_task_rescheduled(abort_fn)
|
||||
|
||||
@enable_ki_protection
|
||||
def clone(self):
|
||||
"""Clone this receive channel object.
|
||||
|
||||
This returns a new `MemoryReceiveChannel` object, which acts as a
|
||||
duplicate of the original: receiving on the new object does exactly
|
||||
the same thing as receiving on the old object.
|
||||
|
||||
However, closing one of the objects does not close the other, and the
|
||||
underlying channel is not closed until all clones are closed. (If
|
||||
you're familiar with `os.dup`, then this is a similar idea.)
|
||||
|
||||
This is useful for communication patterns that involve multiple
|
||||
consumers all receiving objects from the same underlying channel. See
|
||||
:ref:`channel-mpmc` for examples.
|
||||
|
||||
.. warning:: The clones all share the same underlying channel.
|
||||
Whenever a clone :meth:`receive`\\s a value, it is removed from the
|
||||
channel and the other clones do *not* receive that value. If you
|
||||
want to send multiple copies of the same stream of values to
|
||||
multiple destinations, like :func:`itertools.tee`, then you need to
|
||||
find some other solution; this method does *not* do that.
|
||||
|
||||
Raises:
|
||||
trio.ClosedResourceError: if you already closed this
|
||||
`MemoryReceiveChannel` object.
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise trio.ClosedResourceError
|
||||
return MemoryReceiveChannel._create(self._state)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.close()
|
||||
|
||||
@enable_ki_protection
|
||||
def close(self):
|
||||
"""Close this receive channel object synchronously.
|
||||
|
||||
All channel objects have an asynchronous `~.AsyncResource.aclose` method.
|
||||
Memory channels can also be closed synchronously. This has the same
|
||||
effect on the channel and other tasks using it, but `close` is not a
|
||||
trio checkpoint. This simplifies cleaning up in cancelled tasks.
|
||||
|
||||
Using ``with receive_channel:`` will close the channel object on
|
||||
leaving the with block.
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
return
|
||||
self._closed = True
|
||||
for task in self._tasks:
|
||||
trio.lowlevel.reschedule(task, Error(trio.ClosedResourceError()))
|
||||
del self._state.receive_tasks[task]
|
||||
self._tasks.clear()
|
||||
self._state.open_receive_channels -= 1
|
||||
if self._state.open_receive_channels == 0:
|
||||
assert not self._state.receive_tasks
|
||||
for task in self._state.send_tasks:
|
||||
task.custom_sleep_data._tasks.remove(task)
|
||||
trio.lowlevel.reschedule(task, Error(trio.BrokenResourceError()))
|
||||
self._state.send_tasks.clear()
|
||||
self._state.data.clear()
|
||||
|
||||
@enable_ki_protection
|
||||
async def aclose(self):
|
||||
self.close()
|
||||
await trio.lowlevel.checkpoint()
|
||||
92
asq-env/lib/python3.9/site-packages/trio/_core/__init__.py
Normal file
92
asq-env/lib/python3.9/site-packages/trio/_core/__init__.py
Normal file
@@ -0,0 +1,92 @@
|
||||
"""
|
||||
This namespace represents the core functionality that has to be built-in
|
||||
and deal with private internal data structures. Things in this namespace
|
||||
are publicly available in either trio, trio.lowlevel, or trio.testing.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from ._exceptions import (
|
||||
TrioInternalError,
|
||||
RunFinishedError,
|
||||
WouldBlock,
|
||||
Cancelled,
|
||||
BusyResourceError,
|
||||
ClosedResourceError,
|
||||
BrokenResourceError,
|
||||
EndOfChannel,
|
||||
)
|
||||
|
||||
from ._multierror import MultiError
|
||||
|
||||
from ._ki import (
|
||||
enable_ki_protection,
|
||||
disable_ki_protection,
|
||||
currently_ki_protected,
|
||||
)
|
||||
|
||||
# Imports that always exist
|
||||
from ._run import (
|
||||
Task,
|
||||
CancelScope,
|
||||
run,
|
||||
open_nursery,
|
||||
checkpoint,
|
||||
current_task,
|
||||
current_effective_deadline,
|
||||
checkpoint_if_cancelled,
|
||||
TASK_STATUS_IGNORED,
|
||||
current_statistics,
|
||||
current_trio_token,
|
||||
reschedule,
|
||||
remove_instrument,
|
||||
add_instrument,
|
||||
current_clock,
|
||||
current_root_task,
|
||||
spawn_system_task,
|
||||
current_time,
|
||||
wait_all_tasks_blocked,
|
||||
wait_readable,
|
||||
wait_writable,
|
||||
notify_closing,
|
||||
Nursery,
|
||||
start_guest_run,
|
||||
)
|
||||
|
||||
# Has to come after _run to resolve a circular import
|
||||
from ._traps import (
|
||||
cancel_shielded_checkpoint,
|
||||
Abort,
|
||||
wait_task_rescheduled,
|
||||
temporarily_detach_coroutine_object,
|
||||
permanently_detach_coroutine_object,
|
||||
reattach_detached_coroutine_object,
|
||||
)
|
||||
|
||||
from ._entry_queue import TrioToken
|
||||
|
||||
from ._parking_lot import ParkingLot
|
||||
|
||||
from ._unbounded_queue import UnboundedQueue
|
||||
|
||||
from ._local import RunVar
|
||||
|
||||
from ._thread_cache import start_thread_soon
|
||||
|
||||
from ._mock_clock import MockClock
|
||||
|
||||
# Windows imports
|
||||
if sys.platform == "win32":
|
||||
from ._run import (
|
||||
monitor_completion_key,
|
||||
current_iocp,
|
||||
register_with_iocp,
|
||||
wait_overlapped,
|
||||
write_overlapped,
|
||||
readinto_overlapped,
|
||||
)
|
||||
# Kqueue imports
|
||||
elif sys.platform != "linux" and sys.platform != "win32":
|
||||
from ._run import current_kqueue, monitor_kevent, wait_kevent
|
||||
|
||||
del sys # It would be better to import sys as _sys, but mypy does not understand it
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
193
asq-env/lib/python3.9/site-packages/trio/_core/_asyncgens.py
Normal file
193
asq-env/lib/python3.9/site-packages/trio/_core/_asyncgens.py
Normal file
@@ -0,0 +1,193 @@
|
||||
import attr
|
||||
import logging
|
||||
import sys
|
||||
import warnings
|
||||
import weakref
|
||||
|
||||
from .._util import name_asyncgen
|
||||
from . import _run
|
||||
from .. import _core
|
||||
|
||||
# Used to log exceptions in async generator finalizers
|
||||
ASYNCGEN_LOGGER = logging.getLogger("trio.async_generator_errors")
|
||||
|
||||
|
||||
@attr.s(eq=False, slots=True)
|
||||
class AsyncGenerators:
|
||||
# Async generators are added to this set when first iterated. Any
|
||||
# left after the main task exits will be closed before trio.run()
|
||||
# returns. During most of the run, this is a WeakSet so GC works.
|
||||
# During shutdown, when we're finalizing all the remaining
|
||||
# asyncgens after the system nursery has been closed, it's a
|
||||
# regular set so we don't have to deal with GC firing at
|
||||
# unexpected times.
|
||||
alive = attr.ib(factory=weakref.WeakSet)
|
||||
|
||||
# This collects async generators that get garbage collected during
|
||||
# the one-tick window between the system nursery closing and the
|
||||
# init task starting end-of-run asyncgen finalization.
|
||||
trailing_needs_finalize = attr.ib(factory=set)
|
||||
|
||||
prev_hooks = attr.ib(init=False)
|
||||
|
||||
def install_hooks(self, runner):
|
||||
def firstiter(agen):
|
||||
if hasattr(_run.GLOBAL_RUN_CONTEXT, "task"):
|
||||
self.alive.add(agen)
|
||||
else:
|
||||
# An async generator first iterated outside of a Trio
|
||||
# task doesn't belong to Trio. Probably we're in guest
|
||||
# mode and the async generator belongs to our host.
|
||||
# The locals dictionary is the only good place to
|
||||
# remember this fact, at least until
|
||||
# https://bugs.python.org/issue40916 is implemented.
|
||||
agen.ag_frame.f_locals["@trio_foreign_asyncgen"] = True
|
||||
if self.prev_hooks.firstiter is not None:
|
||||
self.prev_hooks.firstiter(agen)
|
||||
|
||||
def finalize_in_trio_context(agen, agen_name):
|
||||
try:
|
||||
runner.spawn_system_task(
|
||||
self._finalize_one,
|
||||
agen,
|
||||
agen_name,
|
||||
name=f"close asyncgen {agen_name} (abandoned)",
|
||||
)
|
||||
except RuntimeError:
|
||||
# There is a one-tick window where the system nursery
|
||||
# is closed but the init task hasn't yet made
|
||||
# self.asyncgens a strong set to disable GC. We seem to
|
||||
# have hit it.
|
||||
self.trailing_needs_finalize.add(agen)
|
||||
|
||||
def finalizer(agen):
|
||||
agen_name = name_asyncgen(agen)
|
||||
try:
|
||||
is_ours = not agen.ag_frame.f_locals.get("@trio_foreign_asyncgen")
|
||||
except AttributeError: # pragma: no cover
|
||||
is_ours = True
|
||||
|
||||
if is_ours:
|
||||
runner.entry_queue.run_sync_soon(
|
||||
finalize_in_trio_context, agen, agen_name
|
||||
)
|
||||
|
||||
# Do this last, because it might raise an exception
|
||||
# depending on the user's warnings filter. (That
|
||||
# exception will be printed to the terminal and
|
||||
# ignored, since we're running in GC context.)
|
||||
warnings.warn(
|
||||
f"Async generator {agen_name!r} was garbage collected before it "
|
||||
f"had been exhausted. Surround its use in 'async with "
|
||||
f"aclosing(...):' to ensure that it gets cleaned up as soon as "
|
||||
f"you're done using it.",
|
||||
ResourceWarning,
|
||||
stacklevel=2,
|
||||
source=agen,
|
||||
)
|
||||
else:
|
||||
# Not ours -> forward to the host loop's async generator finalizer
|
||||
if self.prev_hooks.finalizer is not None:
|
||||
self.prev_hooks.finalizer(agen)
|
||||
else:
|
||||
# Host has no finalizer. Reimplement the default
|
||||
# Python behavior with no hooks installed: throw in
|
||||
# GeneratorExit, step once, raise RuntimeError if
|
||||
# it doesn't exit.
|
||||
closer = agen.aclose()
|
||||
try:
|
||||
# If the next thing is a yield, this will raise RuntimeError
|
||||
# which we allow to propagate
|
||||
closer.send(None)
|
||||
except StopIteration:
|
||||
pass
|
||||
else:
|
||||
# If the next thing is an await, we get here. Give a nicer
|
||||
# error than the default "async generator ignored GeneratorExit"
|
||||
raise RuntimeError(
|
||||
f"Non-Trio async generator {agen_name!r} awaited something "
|
||||
f"during finalization; install a finalization hook to "
|
||||
f"support this, or wrap it in 'async with aclosing(...):'"
|
||||
)
|
||||
|
||||
self.prev_hooks = sys.get_asyncgen_hooks()
|
||||
sys.set_asyncgen_hooks(firstiter=firstiter, finalizer=finalizer)
|
||||
|
||||
async def finalize_remaining(self, runner):
|
||||
# This is called from init after shutting down the system nursery.
|
||||
# The only tasks running at this point are init and
|
||||
# the run_sync_soon task, and since the system nursery is closed,
|
||||
# there's no way for user code to spawn more.
|
||||
assert _core.current_task() is runner.init_task
|
||||
assert len(runner.tasks) == 2
|
||||
|
||||
# To make async generator finalization easier to reason
|
||||
# about, we'll shut down asyncgen garbage collection by turning
|
||||
# the alive WeakSet into a regular set.
|
||||
self.alive = set(self.alive)
|
||||
|
||||
# Process all pending run_sync_soon callbacks, in case one of
|
||||
# them was an asyncgen finalizer that snuck in under the wire.
|
||||
runner.entry_queue.run_sync_soon(runner.reschedule, runner.init_task)
|
||||
await _core.wait_task_rescheduled(
|
||||
lambda _: _core.Abort.FAILED # pragma: no cover
|
||||
)
|
||||
self.alive.update(self.trailing_needs_finalize)
|
||||
self.trailing_needs_finalize.clear()
|
||||
|
||||
# None of the still-living tasks use async generators, so
|
||||
# every async generator must be suspended at a yield point --
|
||||
# there's no one to be doing the iteration. That's good,
|
||||
# because aclose() only works on an asyncgen that's suspended
|
||||
# at a yield point. (If it's suspended at an event loop trap,
|
||||
# because someone is in the middle of iterating it, then you
|
||||
# get a RuntimeError on 3.8+, and a nasty surprise on earlier
|
||||
# versions due to https://bugs.python.org/issue32526.)
|
||||
#
|
||||
# However, once we start aclose() of one async generator, it
|
||||
# might start fetching the next value from another, thus
|
||||
# preventing us from closing that other (at least until
|
||||
# aclose() of the first one is complete). This constraint
|
||||
# effectively requires us to finalize the remaining asyncgens
|
||||
# in arbitrary order, rather than doing all of them at the
|
||||
# same time. On 3.8+ we could defer any generator with
|
||||
# ag_running=True to a later batch, but that only catches
|
||||
# the case where our aclose() starts after the user's
|
||||
# asend()/etc. If our aclose() starts first, then the
|
||||
# user's asend()/etc will raise RuntimeError, since they're
|
||||
# probably not checking ag_running.
|
||||
#
|
||||
# It might be possible to allow some parallelized cleanup if
|
||||
# we can determine that a certain set of asyncgens have no
|
||||
# interdependencies, using gc.get_referents() and such.
|
||||
# But just doing one at a time will typically work well enough
|
||||
# (since each aclose() executes in a cancelled scope) and
|
||||
# is much easier to reason about.
|
||||
|
||||
# It's possible that that cleanup code will itself create
|
||||
# more async generators, so we iterate repeatedly until
|
||||
# all are gone.
|
||||
while self.alive:
|
||||
batch = self.alive
|
||||
self.alive = set()
|
||||
for agen in batch:
|
||||
await self._finalize_one(agen, name_asyncgen(agen))
|
||||
|
||||
def close(self):
|
||||
sys.set_asyncgen_hooks(*self.prev_hooks)
|
||||
|
||||
async def _finalize_one(self, agen, name):
|
||||
try:
|
||||
# This shield ensures that finalize_asyncgen never exits
|
||||
# with an exception, not even a Cancelled. The inside
|
||||
# is cancelled so there's no deadlock risk.
|
||||
with _core.CancelScope(shield=True) as cancel_scope:
|
||||
cancel_scope.cancel()
|
||||
await agen.aclose()
|
||||
except BaseException:
|
||||
ASYNCGEN_LOGGER.exception(
|
||||
"Exception ignored during finalization of async generator %r -- "
|
||||
"surround your use of the generator in 'async with aclosing(...):' "
|
||||
"to raise exceptions like this in the context where they're generated",
|
||||
name,
|
||||
)
|
||||
195
asq-env/lib/python3.9/site-packages/trio/_core/_entry_queue.py
Normal file
195
asq-env/lib/python3.9/site-packages/trio/_core/_entry_queue.py
Normal file
@@ -0,0 +1,195 @@
|
||||
from collections import deque
|
||||
import threading
|
||||
|
||||
import attr
|
||||
|
||||
from .. import _core
|
||||
from .._util import NoPublicConstructor
|
||||
from ._wakeup_socketpair import WakeupSocketpair
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class EntryQueue:
|
||||
# This used to use a queue.Queue. but that was broken, because Queues are
|
||||
# implemented in Python, and not reentrant -- so it was thread-safe, but
|
||||
# not signal-safe. deque is implemented in C, so each operation is atomic
|
||||
# WRT threads (and this is guaranteed in the docs), AND each operation is
|
||||
# atomic WRT signal delivery (signal handlers can run on either side, but
|
||||
# not *during* a deque operation). dict makes similar guarantees - and on
|
||||
# CPython 3.6 and PyPy, it's even ordered!
|
||||
queue = attr.ib(factory=deque)
|
||||
idempotent_queue = attr.ib(factory=dict)
|
||||
|
||||
wakeup = attr.ib(factory=WakeupSocketpair)
|
||||
done = attr.ib(default=False)
|
||||
# Must be a reentrant lock, because it's acquired from signal handlers.
|
||||
# RLock is signal-safe as of cpython 3.2. NB that this does mean that the
|
||||
# lock is effectively *disabled* when we enter from signal context. The
|
||||
# way we use the lock this is OK though, because when
|
||||
# run_sync_soon is called from a signal it's atomic WRT the
|
||||
# main thread -- it just might happen at some inconvenient place. But if
|
||||
# you look at the one place where the main thread holds the lock, it's
|
||||
# just to make 1 assignment, so that's atomic WRT a signal anyway.
|
||||
lock = attr.ib(factory=threading.RLock)
|
||||
|
||||
async def task(self):
|
||||
assert _core.currently_ki_protected()
|
||||
# RLock has two implementations: a signal-safe version in _thread, and
|
||||
# and signal-UNsafe version in threading. We need the signal safe
|
||||
# version. Python 3.2 and later should always use this anyway, but,
|
||||
# since the symptoms if this goes wrong are just "weird rare
|
||||
# deadlocks", then let's make a little check.
|
||||
# See:
|
||||
# https://bugs.python.org/issue13697#msg237140
|
||||
assert self.lock.__class__.__module__ == "_thread"
|
||||
|
||||
def run_cb(job):
|
||||
# We run this with KI protection enabled; it's the callback's
|
||||
# job to disable it if it wants it disabled. Exceptions are
|
||||
# treated like system task exceptions (i.e., converted into
|
||||
# TrioInternalError and cause everything to shut down).
|
||||
sync_fn, args = job
|
||||
try:
|
||||
sync_fn(*args)
|
||||
except BaseException as exc:
|
||||
|
||||
async def kill_everything(exc):
|
||||
raise exc
|
||||
|
||||
try:
|
||||
_core.spawn_system_task(kill_everything, exc)
|
||||
except RuntimeError:
|
||||
# We're quite late in the shutdown process and the
|
||||
# system nursery is already closed.
|
||||
# TODO(2020-06): this is a gross hack and should
|
||||
# be fixed soon when we address #1607.
|
||||
_core.current_task().parent_nursery.start_soon(kill_everything, exc)
|
||||
|
||||
return True
|
||||
|
||||
# This has to be carefully written to be safe in the face of new items
|
||||
# being queued while we iterate, and to do a bounded amount of work on
|
||||
# each pass:
|
||||
def run_all_bounded():
|
||||
for _ in range(len(self.queue)):
|
||||
run_cb(self.queue.popleft())
|
||||
for job in list(self.idempotent_queue):
|
||||
del self.idempotent_queue[job]
|
||||
run_cb(job)
|
||||
|
||||
try:
|
||||
while True:
|
||||
run_all_bounded()
|
||||
if not self.queue and not self.idempotent_queue:
|
||||
await self.wakeup.wait_woken()
|
||||
else:
|
||||
await _core.checkpoint()
|
||||
except _core.Cancelled:
|
||||
# Keep the work done with this lock held as minimal as possible,
|
||||
# because it doesn't protect us against concurrent signal delivery
|
||||
# (see the comment above). Notice that this code would still be
|
||||
# correct if written like:
|
||||
# self.done = True
|
||||
# with self.lock:
|
||||
# pass
|
||||
# because all we want is to force run_sync_soon
|
||||
# to either be completely before or completely after the write to
|
||||
# done. That's why we don't need the lock to protect
|
||||
# against signal handlers.
|
||||
with self.lock:
|
||||
self.done = True
|
||||
# No more jobs will be submitted, so just clear out any residual
|
||||
# ones:
|
||||
run_all_bounded()
|
||||
assert not self.queue
|
||||
assert not self.idempotent_queue
|
||||
|
||||
def close(self):
|
||||
self.wakeup.close()
|
||||
|
||||
def size(self):
|
||||
return len(self.queue) + len(self.idempotent_queue)
|
||||
|
||||
def run_sync_soon(self, sync_fn, *args, idempotent=False):
|
||||
with self.lock:
|
||||
if self.done:
|
||||
raise _core.RunFinishedError("run() has exited")
|
||||
# We have to hold the lock all the way through here, because
|
||||
# otherwise the main thread might exit *while* we're doing these
|
||||
# calls, and then our queue item might not be processed, or the
|
||||
# wakeup call might trigger an OSError b/c the IO manager has
|
||||
# already been shut down.
|
||||
if idempotent:
|
||||
self.idempotent_queue[(sync_fn, args)] = None
|
||||
else:
|
||||
self.queue.append((sync_fn, args))
|
||||
self.wakeup.wakeup_thread_and_signal_safe()
|
||||
|
||||
|
||||
@attr.s(eq=False, hash=False, slots=True)
|
||||
class TrioToken(metaclass=NoPublicConstructor):
|
||||
"""An opaque object representing a single call to :func:`trio.run`.
|
||||
|
||||
It has no public constructor; instead, see :func:`current_trio_token`.
|
||||
|
||||
This object has two uses:
|
||||
|
||||
1. It lets you re-enter the Trio run loop from external threads or signal
|
||||
handlers. This is the low-level primitive that :func:`trio.to_thread`
|
||||
and `trio.from_thread` use to communicate with worker threads, that
|
||||
`trio.open_signal_receiver` uses to receive notifications about
|
||||
signals, and so forth.
|
||||
|
||||
2. Each call to :func:`trio.run` has exactly one associated
|
||||
:class:`TrioToken` object, so you can use it to identify a particular
|
||||
call.
|
||||
|
||||
"""
|
||||
|
||||
_reentry_queue = attr.ib()
|
||||
|
||||
def run_sync_soon(self, sync_fn, *args, idempotent=False):
|
||||
"""Schedule a call to ``sync_fn(*args)`` to occur in the context of a
|
||||
Trio task.
|
||||
|
||||
This is safe to call from the main thread, from other threads, and
|
||||
from signal handlers. This is the fundamental primitive used to
|
||||
re-enter the Trio run loop from outside of it.
|
||||
|
||||
The call will happen "soon", but there's no guarantee about exactly
|
||||
when, and no mechanism provided for finding out when it's happened.
|
||||
If you need this, you'll have to build your own.
|
||||
|
||||
The call is effectively run as part of a system task (see
|
||||
:func:`~trio.lowlevel.spawn_system_task`). In particular this means
|
||||
that:
|
||||
|
||||
* :exc:`KeyboardInterrupt` protection is *enabled* by default; if
|
||||
you want ``sync_fn`` to be interruptible by control-C, then you
|
||||
need to use :func:`~trio.lowlevel.disable_ki_protection`
|
||||
explicitly.
|
||||
|
||||
* If ``sync_fn`` raises an exception, then it's converted into a
|
||||
:exc:`~trio.TrioInternalError` and *all* tasks are cancelled. You
|
||||
should be careful that ``sync_fn`` doesn't crash.
|
||||
|
||||
All calls with ``idempotent=False`` are processed in strict
|
||||
first-in first-out order.
|
||||
|
||||
If ``idempotent=True``, then ``sync_fn`` and ``args`` must be
|
||||
hashable, and Trio will make a best-effort attempt to discard any
|
||||
call submission which is equal to an already-pending call. Trio
|
||||
will process these in first-in first-out order.
|
||||
|
||||
Any ordering guarantees apply separately to ``idempotent=False``
|
||||
and ``idempotent=True`` calls; there's no rule for how calls in the
|
||||
different categories are ordered with respect to each other.
|
||||
|
||||
:raises trio.RunFinishedError:
|
||||
if the associated call to :func:`trio.run`
|
||||
has already exited. (Any call that *doesn't* raise this error
|
||||
is guaranteed to be fully processed before :func:`trio.run`
|
||||
exits.)
|
||||
|
||||
"""
|
||||
self._reentry_queue.run_sync_soon(sync_fn, *args, idempotent=idempotent)
|
||||
114
asq-env/lib/python3.9/site-packages/trio/_core/_exceptions.py
Normal file
114
asq-env/lib/python3.9/site-packages/trio/_core/_exceptions.py
Normal file
@@ -0,0 +1,114 @@
|
||||
import attr
|
||||
|
||||
from trio._util import NoPublicConstructor
|
||||
|
||||
|
||||
class TrioInternalError(Exception):
|
||||
"""Raised by :func:`run` if we encounter a bug in Trio, or (possibly) a
|
||||
misuse of one of the low-level :mod:`trio.lowlevel` APIs.
|
||||
|
||||
This should never happen! If you get this error, please file a bug.
|
||||
|
||||
Unfortunately, if you get this error it also means that all bets are off –
|
||||
Trio doesn't know what is going on and its normal invariants may be void.
|
||||
(For example, we might have "lost track" of a task. Or lost track of all
|
||||
tasks.) Again, though, this shouldn't happen.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class RunFinishedError(RuntimeError):
|
||||
"""Raised by `trio.from_thread.run` and similar functions if the
|
||||
corresponding call to :func:`trio.run` has already finished.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class WouldBlock(Exception):
|
||||
"""Raised by ``X_nowait`` functions if ``X`` would block."""
|
||||
|
||||
|
||||
class Cancelled(BaseException, metaclass=NoPublicConstructor):
|
||||
"""Raised by blocking calls if the surrounding scope has been cancelled.
|
||||
|
||||
You should let this exception propagate, to be caught by the relevant
|
||||
cancel scope. To remind you of this, it inherits from :exc:`BaseException`
|
||||
instead of :exc:`Exception`, just like :exc:`KeyboardInterrupt` and
|
||||
:exc:`SystemExit` do. This means that if you write something like::
|
||||
|
||||
try:
|
||||
...
|
||||
except Exception:
|
||||
...
|
||||
|
||||
then this *won't* catch a :exc:`Cancelled` exception.
|
||||
|
||||
You cannot raise :exc:`Cancelled` yourself. Attempting to do so
|
||||
will produce a :exc:`TypeError`. Use :meth:`cancel_scope.cancel()
|
||||
<trio.CancelScope.cancel>` instead.
|
||||
|
||||
.. note::
|
||||
|
||||
In the US it's also common to see this word spelled "canceled", with
|
||||
only one "l". This is a `recent
|
||||
<https://books.google.com/ngrams/graph?content=canceled%2Ccancelled&year_start=1800&year_end=2000&corpus=5&smoothing=3&direct_url=t1%3B%2Ccanceled%3B%2Cc0%3B.t1%3B%2Ccancelled%3B%2Cc0>`__
|
||||
and `US-specific
|
||||
<https://books.google.com/ngrams/graph?content=canceled%2Ccancelled&year_start=1800&year_end=2000&corpus=18&smoothing=3&share=&direct_url=t1%3B%2Ccanceled%3B%2Cc0%3B.t1%3B%2Ccancelled%3B%2Cc0>`__
|
||||
innovation, and even in the US both forms are still commonly used. So
|
||||
for consistency with the rest of the world and with "cancellation"
|
||||
(which always has two "l"s), Trio uses the two "l" spelling
|
||||
everywhere.
|
||||
|
||||
"""
|
||||
|
||||
def __str__(self):
|
||||
return "Cancelled"
|
||||
|
||||
|
||||
class BusyResourceError(Exception):
|
||||
"""Raised when a task attempts to use a resource that some other task is
|
||||
already using, and this would lead to bugs and nonsense.
|
||||
|
||||
For example, if two tasks try to send data through the same socket at the
|
||||
same time, Trio will raise :class:`BusyResourceError` instead of letting
|
||||
the data get scrambled.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class ClosedResourceError(Exception):
|
||||
"""Raised when attempting to use a resource after it has been closed.
|
||||
|
||||
Note that "closed" here means that *your* code closed the resource,
|
||||
generally by calling a method with a name like ``close`` or ``aclose``, or
|
||||
by exiting a context manager. If a problem arises elsewhere – for example,
|
||||
because of a network failure, or because a remote peer closed their end of
|
||||
a connection – then that should be indicated by a different exception
|
||||
class, like :exc:`BrokenResourceError` or an :exc:`OSError` subclass.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class BrokenResourceError(Exception):
|
||||
"""Raised when an attempt to use a resource fails due to external
|
||||
circumstances.
|
||||
|
||||
For example, you might get this if you try to send data on a stream where
|
||||
the remote side has already closed the connection.
|
||||
|
||||
You *don't* get this error if *you* closed the resource – in that case you
|
||||
get :class:`ClosedResourceError`.
|
||||
|
||||
This exception's ``__cause__`` attribute will often contain more
|
||||
information about the underlying error.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class EndOfChannel(Exception):
|
||||
"""Raised when trying to receive from a :class:`trio.abc.ReceiveChannel`
|
||||
that has no more data to receive.
|
||||
|
||||
This is analogous to an "end-of-file" condition, but for channels.
|
||||
|
||||
"""
|
||||
@@ -0,0 +1,47 @@
|
||||
# ***********************************************************
|
||||
# ******* WARNING: AUTOGENERATED! ALL EDITS WILL BE LOST ******
|
||||
# *************************************************************
|
||||
from ._run import GLOBAL_RUN_CONTEXT, _NO_SEND
|
||||
from ._ki import LOCALS_KEY_KI_PROTECTION_ENABLED
|
||||
from ._instrumentation import Instrument
|
||||
|
||||
# fmt: off
|
||||
|
||||
|
||||
def add_instrument(instrument: Instrument) ->None:
|
||||
"""Start instrumenting the current run loop with the given instrument.
|
||||
|
||||
Args:
|
||||
instrument (trio.abc.Instrument): The instrument to activate.
|
||||
|
||||
If ``instrument`` is already active, does nothing.
|
||||
|
||||
"""
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.instruments.add_instrument(instrument)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
def remove_instrument(instrument: Instrument) ->None:
|
||||
"""Stop instrumenting the current run loop with the given instrument.
|
||||
|
||||
Args:
|
||||
instrument (trio.abc.Instrument): The instrument to de-activate.
|
||||
|
||||
Raises:
|
||||
KeyError: if the instrument is not currently active. This could
|
||||
occur either because you never added it, or because you added it
|
||||
and then it raised an unhandled exception and was automatically
|
||||
deactivated.
|
||||
|
||||
"""
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.instruments.remove_instrument(instrument)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
# fmt: on
|
||||
@@ -0,0 +1,35 @@
|
||||
# ***********************************************************
|
||||
# ******* WARNING: AUTOGENERATED! ALL EDITS WILL BE LOST ******
|
||||
# *************************************************************
|
||||
from ._run import GLOBAL_RUN_CONTEXT, _NO_SEND
|
||||
from ._ki import LOCALS_KEY_KI_PROTECTION_ENABLED
|
||||
from ._instrumentation import Instrument
|
||||
|
||||
# fmt: off
|
||||
|
||||
|
||||
async def wait_readable(fd):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return await GLOBAL_RUN_CONTEXT.runner.io_manager.wait_readable(fd)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
async def wait_writable(fd):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return await GLOBAL_RUN_CONTEXT.runner.io_manager.wait_writable(fd)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
def notify_closing(fd):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.io_manager.notify_closing(fd)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
# fmt: on
|
||||
@@ -0,0 +1,59 @@
|
||||
# ***********************************************************
|
||||
# ******* WARNING: AUTOGENERATED! ALL EDITS WILL BE LOST ******
|
||||
# *************************************************************
|
||||
from ._run import GLOBAL_RUN_CONTEXT, _NO_SEND
|
||||
from ._ki import LOCALS_KEY_KI_PROTECTION_ENABLED
|
||||
from ._instrumentation import Instrument
|
||||
|
||||
# fmt: off
|
||||
|
||||
|
||||
def current_kqueue():
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.io_manager.current_kqueue()
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
def monitor_kevent(ident, filter):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.io_manager.monitor_kevent(ident, filter)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
async def wait_kevent(ident, filter, abort_func):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return await GLOBAL_RUN_CONTEXT.runner.io_manager.wait_kevent(ident, filter, abort_func)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
async def wait_readable(fd):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return await GLOBAL_RUN_CONTEXT.runner.io_manager.wait_readable(fd)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
async def wait_writable(fd):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return await GLOBAL_RUN_CONTEXT.runner.io_manager.wait_writable(fd)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
def notify_closing(fd):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.io_manager.notify_closing(fd)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
# fmt: on
|
||||
@@ -0,0 +1,83 @@
|
||||
# ***********************************************************
|
||||
# ******* WARNING: AUTOGENERATED! ALL EDITS WILL BE LOST ******
|
||||
# *************************************************************
|
||||
from ._run import GLOBAL_RUN_CONTEXT, _NO_SEND
|
||||
from ._ki import LOCALS_KEY_KI_PROTECTION_ENABLED
|
||||
from ._instrumentation import Instrument
|
||||
|
||||
# fmt: off
|
||||
|
||||
|
||||
async def wait_readable(sock):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return await GLOBAL_RUN_CONTEXT.runner.io_manager.wait_readable(sock)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
async def wait_writable(sock):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return await GLOBAL_RUN_CONTEXT.runner.io_manager.wait_writable(sock)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
def notify_closing(handle):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.io_manager.notify_closing(handle)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
def register_with_iocp(handle):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.io_manager.register_with_iocp(handle)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
async def wait_overlapped(handle, lpOverlapped):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return await GLOBAL_RUN_CONTEXT.runner.io_manager.wait_overlapped(handle, lpOverlapped)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
async def write_overlapped(handle, data, file_offset=0):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return await GLOBAL_RUN_CONTEXT.runner.io_manager.write_overlapped(handle, data, file_offset)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
async def readinto_overlapped(handle, buffer, file_offset=0):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return await GLOBAL_RUN_CONTEXT.runner.io_manager.readinto_overlapped(handle, buffer, file_offset)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
def current_iocp():
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.io_manager.current_iocp()
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
def monitor_completion_key():
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.io_manager.monitor_completion_key()
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
# fmt: on
|
||||
237
asq-env/lib/python3.9/site-packages/trio/_core/_generated_run.py
Normal file
237
asq-env/lib/python3.9/site-packages/trio/_core/_generated_run.py
Normal file
@@ -0,0 +1,237 @@
|
||||
# ***********************************************************
|
||||
# ******* WARNING: AUTOGENERATED! ALL EDITS WILL BE LOST ******
|
||||
# *************************************************************
|
||||
from ._run import GLOBAL_RUN_CONTEXT, _NO_SEND
|
||||
from ._ki import LOCALS_KEY_KI_PROTECTION_ENABLED
|
||||
from ._instrumentation import Instrument
|
||||
|
||||
# fmt: off
|
||||
|
||||
|
||||
def current_statistics():
|
||||
"""Returns an object containing run-loop-level debugging information.
|
||||
|
||||
Currently the following fields are defined:
|
||||
|
||||
* ``tasks_living`` (int): The number of tasks that have been spawned
|
||||
and not yet exited.
|
||||
* ``tasks_runnable`` (int): The number of tasks that are currently
|
||||
queued on the run queue (as opposed to blocked waiting for something
|
||||
to happen).
|
||||
* ``seconds_to_next_deadline`` (float): The time until the next
|
||||
pending cancel scope deadline. May be negative if the deadline has
|
||||
expired but we haven't yet processed cancellations. May be
|
||||
:data:`~math.inf` if there are no pending deadlines.
|
||||
* ``run_sync_soon_queue_size`` (int): The number of
|
||||
unprocessed callbacks queued via
|
||||
:meth:`trio.lowlevel.TrioToken.run_sync_soon`.
|
||||
* ``io_statistics`` (object): Some statistics from Trio's I/O
|
||||
backend. This always has an attribute ``backend`` which is a string
|
||||
naming which operating-system-specific I/O backend is in use; the
|
||||
other attributes vary between backends.
|
||||
|
||||
"""
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.current_statistics()
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
def current_time():
|
||||
"""Returns the current time according to Trio's internal clock.
|
||||
|
||||
Returns:
|
||||
float: The current time.
|
||||
|
||||
Raises:
|
||||
RuntimeError: if not inside a call to :func:`trio.run`.
|
||||
|
||||
"""
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.current_time()
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
def current_clock():
|
||||
"""Returns the current :class:`~trio.abc.Clock`."""
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.current_clock()
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
def current_root_task():
|
||||
"""Returns the current root :class:`Task`.
|
||||
|
||||
This is the task that is the ultimate parent of all other tasks.
|
||||
|
||||
"""
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.current_root_task()
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
def reschedule(task, next_send=_NO_SEND):
|
||||
"""Reschedule the given task with the given
|
||||
:class:`outcome.Outcome`.
|
||||
|
||||
See :func:`wait_task_rescheduled` for the gory details.
|
||||
|
||||
There must be exactly one call to :func:`reschedule` for every call to
|
||||
:func:`wait_task_rescheduled`. (And when counting, keep in mind that
|
||||
returning :data:`Abort.SUCCEEDED` from an abort callback is equivalent
|
||||
to calling :func:`reschedule` once.)
|
||||
|
||||
Args:
|
||||
task (trio.lowlevel.Task): the task to be rescheduled. Must be blocked
|
||||
in a call to :func:`wait_task_rescheduled`.
|
||||
next_send (outcome.Outcome): the value (or error) to return (or
|
||||
raise) from :func:`wait_task_rescheduled`.
|
||||
|
||||
"""
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.reschedule(task, next_send)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
def spawn_system_task(async_fn, *args, name=None):
|
||||
"""Spawn a "system" task.
|
||||
|
||||
System tasks have a few differences from regular tasks:
|
||||
|
||||
* They don't need an explicit nursery; instead they go into the
|
||||
internal "system nursery".
|
||||
|
||||
* If a system task raises an exception, then it's converted into a
|
||||
:exc:`~trio.TrioInternalError` and *all* tasks are cancelled. If you
|
||||
write a system task, you should be careful to make sure it doesn't
|
||||
crash.
|
||||
|
||||
* System tasks are automatically cancelled when the main task exits.
|
||||
|
||||
* By default, system tasks have :exc:`KeyboardInterrupt` protection
|
||||
*enabled*. If you want your task to be interruptible by control-C,
|
||||
then you need to use :func:`disable_ki_protection` explicitly (and
|
||||
come up with some plan for what to do with a
|
||||
:exc:`KeyboardInterrupt`, given that system tasks aren't allowed to
|
||||
raise exceptions).
|
||||
|
||||
* System tasks do not inherit context variables from their creator.
|
||||
|
||||
Towards the end of a call to :meth:`trio.run`, after the main
|
||||
task and all system tasks have exited, the system nursery
|
||||
becomes closed. At this point, new calls to
|
||||
:func:`spawn_system_task` will raise ``RuntimeError("Nursery
|
||||
is closed to new arrivals")`` instead of creating a system
|
||||
task. It's possible to encounter this state either in
|
||||
a ``finally`` block in an async generator, or in a callback
|
||||
passed to :meth:`TrioToken.run_sync_soon` at the right moment.
|
||||
|
||||
Args:
|
||||
async_fn: An async callable.
|
||||
args: Positional arguments for ``async_fn``. If you want to pass
|
||||
keyword arguments, use :func:`functools.partial`.
|
||||
name: The name for this task. Only used for debugging/introspection
|
||||
(e.g. ``repr(task_obj)``). If this isn't a string,
|
||||
:func:`spawn_system_task` will try to make it one. A common use
|
||||
case is if you're wrapping a function before spawning a new
|
||||
task, you might pass the original function as the ``name=`` to
|
||||
make debugging easier.
|
||||
|
||||
Returns:
|
||||
Task: the newly spawned task
|
||||
|
||||
"""
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.spawn_system_task(async_fn, *args, name=name)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
def current_trio_token():
|
||||
"""Retrieve the :class:`TrioToken` for the current call to
|
||||
:func:`trio.run`.
|
||||
|
||||
"""
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return GLOBAL_RUN_CONTEXT.runner.current_trio_token()
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
async def wait_all_tasks_blocked(cushion=0.0):
|
||||
"""Block until there are no runnable tasks.
|
||||
|
||||
This is useful in testing code when you want to give other tasks a
|
||||
chance to "settle down". The calling task is blocked, and doesn't wake
|
||||
up until all other tasks are also blocked for at least ``cushion``
|
||||
seconds. (Setting a non-zero ``cushion`` is intended to handle cases
|
||||
like two tasks talking to each other over a local socket, where we
|
||||
want to ignore the potential brief moment between a send and receive
|
||||
when all tasks are blocked.)
|
||||
|
||||
Note that ``cushion`` is measured in *real* time, not the Trio clock
|
||||
time.
|
||||
|
||||
If there are multiple tasks blocked in :func:`wait_all_tasks_blocked`,
|
||||
then the one with the shortest ``cushion`` is the one woken (and
|
||||
this task becoming unblocked resets the timers for the remaining
|
||||
tasks). If there are multiple tasks that have exactly the same
|
||||
``cushion``, then all are woken.
|
||||
|
||||
You should also consider :class:`trio.testing.Sequencer`, which
|
||||
provides a more explicit way to control execution ordering within a
|
||||
test, and will often produce more readable tests.
|
||||
|
||||
Example:
|
||||
Here's an example of one way to test that Trio's locks are fair: we
|
||||
take the lock in the parent, start a child, wait for the child to be
|
||||
blocked waiting for the lock (!), and then check that we can't
|
||||
release and immediately re-acquire the lock::
|
||||
|
||||
async def lock_taker(lock):
|
||||
await lock.acquire()
|
||||
lock.release()
|
||||
|
||||
async def test_lock_fairness():
|
||||
lock = trio.Lock()
|
||||
await lock.acquire()
|
||||
async with trio.open_nursery() as nursery:
|
||||
nursery.start_soon(lock_taker, lock)
|
||||
# child hasn't run yet, we have the lock
|
||||
assert lock.locked()
|
||||
assert lock._owner is trio.lowlevel.current_task()
|
||||
await trio.testing.wait_all_tasks_blocked()
|
||||
# now the child has run and is blocked on lock.acquire(), we
|
||||
# still have the lock
|
||||
assert lock.locked()
|
||||
assert lock._owner is trio.lowlevel.current_task()
|
||||
lock.release()
|
||||
try:
|
||||
# The child has a prior claim, so we can't have it
|
||||
lock.acquire_nowait()
|
||||
except trio.WouldBlock:
|
||||
assert lock._owner is not trio.lowlevel.current_task()
|
||||
print("PASS")
|
||||
else:
|
||||
print("FAIL")
|
||||
|
||||
"""
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True
|
||||
try:
|
||||
return await GLOBAL_RUN_CONTEXT.runner.wait_all_tasks_blocked(cushion)
|
||||
except AttributeError:
|
||||
raise RuntimeError("must be called from async context")
|
||||
|
||||
|
||||
# fmt: on
|
||||
@@ -0,0 +1,108 @@
|
||||
import logging
|
||||
import types
|
||||
import attr
|
||||
from typing import Any, Callable, Dict, List, Sequence, Iterator, TypeVar
|
||||
|
||||
from .._abc import Instrument
|
||||
|
||||
# Used to log exceptions in instruments
|
||||
INSTRUMENT_LOGGER = logging.getLogger("trio.abc.Instrument")
|
||||
|
||||
|
||||
F = TypeVar("F", bound=Callable[..., Any])
|
||||
|
||||
# Decorator to mark methods public. This does nothing by itself, but
|
||||
# trio/_tools/gen_exports.py looks for it.
|
||||
def _public(fn: F) -> F:
|
||||
return fn
|
||||
|
||||
|
||||
class Instruments(Dict[str, Dict[Instrument, None]]):
|
||||
"""A collection of `trio.abc.Instrument` organized by hook.
|
||||
|
||||
Instrumentation calls are rather expensive, and we don't want a
|
||||
rarely-used instrument (like before_run()) to slow down hot
|
||||
operations (like before_task_step()). Thus, we cache the set of
|
||||
instruments to be called for each hook, and skip the instrumentation
|
||||
call if there's nothing currently installed for that hook.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def __init__(self, incoming: Sequence[Instrument]):
|
||||
self["_all"] = {}
|
||||
for instrument in incoming:
|
||||
self.add_instrument(instrument)
|
||||
|
||||
@_public
|
||||
def add_instrument(self, instrument: Instrument) -> None:
|
||||
"""Start instrumenting the current run loop with the given instrument.
|
||||
|
||||
Args:
|
||||
instrument (trio.abc.Instrument): The instrument to activate.
|
||||
|
||||
If ``instrument`` is already active, does nothing.
|
||||
|
||||
"""
|
||||
if instrument in self["_all"]:
|
||||
return
|
||||
self["_all"][instrument] = None
|
||||
try:
|
||||
for name in dir(instrument):
|
||||
if name.startswith("_"):
|
||||
continue
|
||||
try:
|
||||
prototype = getattr(Instrument, name)
|
||||
except AttributeError:
|
||||
continue
|
||||
impl = getattr(instrument, name)
|
||||
if isinstance(impl, types.MethodType) and impl.__func__ is prototype:
|
||||
# Inherited unchanged from _abc.Instrument
|
||||
continue
|
||||
self.setdefault(name, {})[instrument] = None
|
||||
except:
|
||||
self.remove_instrument(instrument)
|
||||
raise
|
||||
|
||||
@_public
|
||||
def remove_instrument(self, instrument: Instrument) -> None:
|
||||
"""Stop instrumenting the current run loop with the given instrument.
|
||||
|
||||
Args:
|
||||
instrument (trio.abc.Instrument): The instrument to de-activate.
|
||||
|
||||
Raises:
|
||||
KeyError: if the instrument is not currently active. This could
|
||||
occur either because you never added it, or because you added it
|
||||
and then it raised an unhandled exception and was automatically
|
||||
deactivated.
|
||||
|
||||
"""
|
||||
# If instrument isn't present, the KeyError propagates out
|
||||
self["_all"].pop(instrument)
|
||||
for hookname, instruments in list(self.items()):
|
||||
if instrument in instruments:
|
||||
del instruments[instrument]
|
||||
if not instruments:
|
||||
del self[hookname]
|
||||
|
||||
def call(self, hookname: str, *args: Any) -> None:
|
||||
"""Call hookname(*args) on each applicable instrument.
|
||||
|
||||
You must first check whether there are any instruments installed for
|
||||
that hook, e.g.::
|
||||
|
||||
if "before_task_step" in instruments:
|
||||
instruments.call("before_task_step", task)
|
||||
"""
|
||||
for instrument in list(self[hookname]):
|
||||
try:
|
||||
getattr(instrument, hookname)(*args)
|
||||
except:
|
||||
self.remove_instrument(instrument)
|
||||
INSTRUMENT_LOGGER.exception(
|
||||
"Exception raised when calling %r on instrument %r. "
|
||||
"Instrument has been disabled.",
|
||||
hookname,
|
||||
instrument,
|
||||
)
|
||||
22
asq-env/lib/python3.9/site-packages/trio/_core/_io_common.py
Normal file
22
asq-env/lib/python3.9/site-packages/trio/_core/_io_common.py
Normal file
@@ -0,0 +1,22 @@
|
||||
import copy
|
||||
import outcome
|
||||
from .. import _core
|
||||
|
||||
|
||||
# Utility function shared between _io_epoll and _io_windows
|
||||
def wake_all(waiters, exc):
|
||||
try:
|
||||
current_task = _core.current_task()
|
||||
except RuntimeError:
|
||||
current_task = None
|
||||
raise_at_end = False
|
||||
for attr_name in ["read_task", "write_task"]:
|
||||
task = getattr(waiters, attr_name)
|
||||
if task is not None:
|
||||
if task is current_task:
|
||||
raise_at_end = True
|
||||
else:
|
||||
_core.reschedule(task, outcome.Error(copy.copy(exc)))
|
||||
setattr(waiters, attr_name, None)
|
||||
if raise_at_end:
|
||||
raise exc
|
||||
317
asq-env/lib/python3.9/site-packages/trio/_core/_io_epoll.py
Normal file
317
asq-env/lib/python3.9/site-packages/trio/_core/_io_epoll.py
Normal file
@@ -0,0 +1,317 @@
|
||||
import select
|
||||
import sys
|
||||
import attr
|
||||
from collections import defaultdict
|
||||
from typing import Dict, TYPE_CHECKING
|
||||
|
||||
from .. import _core
|
||||
from ._run import _public
|
||||
from ._io_common import wake_all
|
||||
from ._wakeup_socketpair import WakeupSocketpair
|
||||
|
||||
assert not TYPE_CHECKING or sys.platform == "linux"
|
||||
|
||||
|
||||
@attr.s(slots=True, eq=False, frozen=True)
|
||||
class _EpollStatistics:
|
||||
tasks_waiting_read = attr.ib()
|
||||
tasks_waiting_write = attr.ib()
|
||||
backend = attr.ib(default="epoll")
|
||||
|
||||
|
||||
# Some facts about epoll
|
||||
# ----------------------
|
||||
#
|
||||
# Internally, an epoll object is sort of like a WeakKeyDictionary where the
|
||||
# keys are tuples of (fd number, file object). When you call epoll_ctl, you
|
||||
# pass in an fd; that gets converted to an (fd number, file object) tuple by
|
||||
# looking up the fd in the process's fd table at the time of the call. When an
|
||||
# event happens on the file object, epoll_wait drops the file object part, and
|
||||
# just returns the fd number in its event. So from the outside it looks like
|
||||
# it's keeping a table of fds, but really it's a bit more complicated. This
|
||||
# has some subtle consequences.
|
||||
#
|
||||
# In general, file objects inside the kernel are reference counted. Each entry
|
||||
# in a process's fd table holds a strong reference to the corresponding file
|
||||
# object, and most operations that use file objects take a temporary strong
|
||||
# reference while they're working. So when you call close() on an fd, that
|
||||
# might or might not cause the file object to be deallocated -- it depends on
|
||||
# whether there are any other references to that file object. Some common ways
|
||||
# this can happen:
|
||||
#
|
||||
# - after calling dup(), you have two fds in the same process referring to the
|
||||
# same file object. Even if you close one fd (= remove that entry from the
|
||||
# fd table), the file object will be kept alive by the other fd.
|
||||
# - when calling fork(), the child inherits a copy of the parent's fd table,
|
||||
# so all the file objects get another reference. (But if the fork() is
|
||||
# followed by exec(), then all of the child's fds that have the CLOEXEC flag
|
||||
# set will be closed at that point.)
|
||||
# - most syscalls that work on fds take a strong reference to the underlying
|
||||
# file object while they're using it. So there's one thread blocked in
|
||||
# read(fd), and then another thread calls close() on the last fd referring
|
||||
# to that object, the underlying file won't actually be closed until
|
||||
# after read() returns.
|
||||
#
|
||||
# However, epoll does *not* take a reference to any of the file objects in its
|
||||
# interest set (that's what makes it similar to a WeakKeyDictionary). File
|
||||
# objects inside an epoll interest set will be deallocated if all *other*
|
||||
# references to them are closed. And when that happens, the epoll object will
|
||||
# automatically deregister that file object and stop reporting events on it.
|
||||
# So that's quite handy.
|
||||
#
|
||||
# But, what happens if we do this?
|
||||
#
|
||||
# fd1 = open(...)
|
||||
# epoll_ctl(EPOLL_CTL_ADD, fd1, ...)
|
||||
# fd2 = dup(fd1)
|
||||
# close(fd1)
|
||||
#
|
||||
# In this case, the dup() keeps the underlying file object alive, so it
|
||||
# remains registered in the epoll object's interest set, as the tuple (fd1,
|
||||
# file object). But, fd1 no longer refers to this file object! You might think
|
||||
# there was some magic to handle this, but unfortunately no; the consequences
|
||||
# are totally predictable from what I said above:
|
||||
#
|
||||
# If any events occur on the file object, then epoll will report them as
|
||||
# happening on fd1, even though that doesn't make sense.
|
||||
#
|
||||
# Perhaps we would like to deregister fd1 to stop getting nonsensical events.
|
||||
# But how? When we call epoll_ctl, we have to pass an fd number, which will
|
||||
# get expanded to an (fd number, file object) tuple. We can't pass fd1,
|
||||
# because when epoll_ctl tries to look it up, it won't find our file object.
|
||||
# And we can't pass fd2, because that will get expanded to (fd2, file object),
|
||||
# which is a different lookup key. In fact, it's *impossible* to de-register
|
||||
# this fd!
|
||||
#
|
||||
# We could even have fd1 get assigned to another file object, and then we can
|
||||
# have multiple keys registered simultaneously using the same fd number, like:
|
||||
# (fd1, file object 1), (fd1, file object 2). And if events happen on either
|
||||
# file object, then epoll will happily report that something happened to
|
||||
# "fd1".
|
||||
#
|
||||
# Now here's what makes this especially nasty: suppose the old file object
|
||||
# becomes, say, readable. That means that every time we call epoll_wait, it
|
||||
# will return immediately to tell us that "fd1" is readable. Normally, we
|
||||
# would handle this by de-registering fd1, waking up the corresponding call to
|
||||
# wait_readable, then the user will call read() or recv() or something, and
|
||||
# we're fine. But if this happens on a stale fd where we can't remove the
|
||||
# registration, then we might get stuck in a state where epoll_wait *always*
|
||||
# returns immediately, so our event loop becomes unable to sleep, and now our
|
||||
# program is burning 100% of the CPU doing nothing, with no way out.
|
||||
#
|
||||
#
|
||||
# What does this mean for Trio?
|
||||
# -----------------------------
|
||||
#
|
||||
# Since we don't control the user's code, we have no way to guarantee that we
|
||||
# don't get stuck with stale fd's in our epoll interest set. For example, a
|
||||
# user could call wait_readable(fd) in one task, and then while that's
|
||||
# running, they might close(fd) from another task. In this situation, they're
|
||||
# *supposed* to call notify_closing(fd) to let us know what's happening, so we
|
||||
# can interrupt the wait_readable() call and avoid getting into this mess. And
|
||||
# that's the only thing that can possibly work correctly in all cases. But
|
||||
# sometimes user code has bugs. So if this does happen, we'd like to degrade
|
||||
# gracefully, and survive without corrupting Trio's internal state or
|
||||
# otherwise causing the whole program to explode messily.
|
||||
#
|
||||
# Our solution: we always use EPOLLONESHOT. This way, we might get *one*
|
||||
# spurious event on a stale fd, but then epoll will automatically silence it
|
||||
# until we explicitly say that we want more events... and if we have a stale
|
||||
# fd, then we actually can't re-enable it! So we can't get stuck in an
|
||||
# infinite busy-loop. If there's a stale fd hanging around, then it might
|
||||
# cause a spurious `BusyResourceError`, or cause one wait_* call to return
|
||||
# before it should have... but in general, the wait_* functions are allowed to
|
||||
# have some spurious wakeups; the user code will just attempt the operation,
|
||||
# get EWOULDBLOCK, and call wait_* again. And the program as a whole will
|
||||
# survive, any exceptions will propagate, etc.
|
||||
#
|
||||
# As a bonus, EPOLLONESHOT also saves us having to explicitly deregister fds
|
||||
# on the normal wakeup path, so it's a bit more efficient in general.
|
||||
#
|
||||
# However, EPOLLONESHOT has a few trade-offs to consider:
|
||||
#
|
||||
# First, you can't combine EPOLLONESHOT with EPOLLEXCLUSIVE. This is a bit sad
|
||||
# in one somewhat rare case: if you have a multi-process server where a group
|
||||
# of processes all share the same listening socket, then EPOLLEXCLUSIVE can be
|
||||
# used to avoid "thundering herd" problems when a new connection comes in. But
|
||||
# this isn't too bad. It's not clear if EPOLLEXCLUSIVE even works for us
|
||||
# anyway:
|
||||
#
|
||||
# https://stackoverflow.com/questions/41582560/how-does-epolls-epollexclusive-mode-interact-with-level-triggering
|
||||
#
|
||||
# And it's not clear that EPOLLEXCLUSIVE is a great approach either:
|
||||
#
|
||||
# https://blog.cloudflare.com/the-sad-state-of-linux-socket-balancing/
|
||||
#
|
||||
# And if we do need to support this, we could always add support through some
|
||||
# more-specialized API in the future. So this isn't a blocker to using
|
||||
# EPOLLONESHOT.
|
||||
#
|
||||
# Second, EPOLLONESHOT does not actually *deregister* the fd after delivering
|
||||
# an event (EPOLL_CTL_DEL). Instead, it keeps the fd registered, but
|
||||
# effectively does an EPOLL_CTL_MOD to set the fd's interest flags to
|
||||
# all-zeros. So we could still end up with an fd hanging around in the
|
||||
# interest set for a long time, even if we're not using it.
|
||||
#
|
||||
# Fortunately, this isn't a problem, because it's only a weak reference – if
|
||||
# we have a stale fd that's been silenced by EPOLLONESHOT, then it wastes a
|
||||
# tiny bit of kernel memory remembering this fd that can never be revived, but
|
||||
# when the underlying file object is eventually closed, that memory will be
|
||||
# reclaimed. So that's OK.
|
||||
#
|
||||
# The other issue is that when someone calls wait_*, using EPOLLONESHOT means
|
||||
# that if we have ever waited for this fd before, we have to use EPOLL_CTL_MOD
|
||||
# to re-enable it; but if it's a new fd, we have to use EPOLL_CTL_ADD. How do
|
||||
# we know which one to use? There's no reasonable way to track which fds are
|
||||
# currently registered -- remember, we're assuming the user might have gone
|
||||
# and rearranged their fds without telling us!
|
||||
#
|
||||
# Fortunately, this also has a simple solution: if we wait on a socket or
|
||||
# other fd once, then we'll probably wait on it lots of times. And the epoll
|
||||
# object itself knows which fds it already has registered. So when an fd comes
|
||||
# in, we optimistically assume that it's been waited on before, and try doing
|
||||
# EPOLL_CTL_MOD. And if that fails with an ENOENT error, then we try again
|
||||
# with EPOLL_CTL_ADD.
|
||||
#
|
||||
# So that's why this code is the way it is. And now you know more than you
|
||||
# wanted to about how epoll works.
|
||||
|
||||
|
||||
@attr.s(slots=True, eq=False)
|
||||
class EpollWaiters:
|
||||
read_task = attr.ib(default=None)
|
||||
write_task = attr.ib(default=None)
|
||||
current_flags = attr.ib(default=0)
|
||||
|
||||
|
||||
@attr.s(slots=True, eq=False, hash=False)
|
||||
class EpollIOManager:
|
||||
_epoll = attr.ib(factory=select.epoll)
|
||||
# {fd: EpollWaiters}
|
||||
_registered = attr.ib(
|
||||
factory=lambda: defaultdict(EpollWaiters), type=Dict[int, EpollWaiters]
|
||||
)
|
||||
_force_wakeup = attr.ib(factory=WakeupSocketpair)
|
||||
_force_wakeup_fd = attr.ib(default=None)
|
||||
|
||||
def __attrs_post_init__(self):
|
||||
self._epoll.register(self._force_wakeup.wakeup_sock, select.EPOLLIN)
|
||||
self._force_wakeup_fd = self._force_wakeup.wakeup_sock.fileno()
|
||||
|
||||
def statistics(self):
|
||||
tasks_waiting_read = 0
|
||||
tasks_waiting_write = 0
|
||||
for waiter in self._registered.values():
|
||||
if waiter.read_task is not None:
|
||||
tasks_waiting_read += 1
|
||||
if waiter.write_task is not None:
|
||||
tasks_waiting_write += 1
|
||||
return _EpollStatistics(
|
||||
tasks_waiting_read=tasks_waiting_read,
|
||||
tasks_waiting_write=tasks_waiting_write,
|
||||
)
|
||||
|
||||
def close(self):
|
||||
self._epoll.close()
|
||||
self._force_wakeup.close()
|
||||
|
||||
def force_wakeup(self):
|
||||
self._force_wakeup.wakeup_thread_and_signal_safe()
|
||||
|
||||
# Return value must be False-y IFF the timeout expired, NOT if any I/O
|
||||
# happened or force_wakeup was called. Otherwise it can be anything; gets
|
||||
# passed straight through to process_events.
|
||||
def get_events(self, timeout):
|
||||
# max_events must be > 0 or epoll gets cranky
|
||||
# accessing self._registered from a thread looks dangerous, but it's
|
||||
# OK because it doesn't matter if our value is a little bit off.
|
||||
max_events = max(1, len(self._registered))
|
||||
return self._epoll.poll(timeout, max_events)
|
||||
|
||||
def process_events(self, events):
|
||||
for fd, flags in events:
|
||||
if fd == self._force_wakeup_fd:
|
||||
self._force_wakeup.drain()
|
||||
continue
|
||||
waiters = self._registered[fd]
|
||||
# EPOLLONESHOT always clears the flags when an event is delivered
|
||||
waiters.current_flags = 0
|
||||
# Clever hack stolen from selectors.EpollSelector: an event
|
||||
# with EPOLLHUP or EPOLLERR flags wakes both readers and
|
||||
# writers.
|
||||
if flags & ~select.EPOLLIN and waiters.write_task is not None:
|
||||
_core.reschedule(waiters.write_task)
|
||||
waiters.write_task = None
|
||||
if flags & ~select.EPOLLOUT and waiters.read_task is not None:
|
||||
_core.reschedule(waiters.read_task)
|
||||
waiters.read_task = None
|
||||
self._update_registrations(fd)
|
||||
|
||||
def _update_registrations(self, fd):
|
||||
waiters = self._registered[fd]
|
||||
wanted_flags = 0
|
||||
if waiters.read_task is not None:
|
||||
wanted_flags |= select.EPOLLIN
|
||||
if waiters.write_task is not None:
|
||||
wanted_flags |= select.EPOLLOUT
|
||||
if wanted_flags != waiters.current_flags:
|
||||
try:
|
||||
try:
|
||||
# First try EPOLL_CTL_MOD
|
||||
self._epoll.modify(fd, wanted_flags | select.EPOLLONESHOT)
|
||||
except OSError:
|
||||
# If that fails, it might be a new fd; try EPOLL_CTL_ADD
|
||||
self._epoll.register(fd, wanted_flags | select.EPOLLONESHOT)
|
||||
waiters.current_flags = wanted_flags
|
||||
except OSError as exc:
|
||||
# If everything fails, probably it's a bad fd, e.g. because
|
||||
# the fd was closed behind our back. In this case we don't
|
||||
# want to try to unregister the fd, because that will probably
|
||||
# fail too. Just clear our state and wake everyone up.
|
||||
del self._registered[fd]
|
||||
# This could raise (in case we're calling this inside one of
|
||||
# the to-be-woken tasks), so we have to do it last.
|
||||
wake_all(waiters, exc)
|
||||
return
|
||||
if not wanted_flags:
|
||||
del self._registered[fd]
|
||||
|
||||
async def _epoll_wait(self, fd, attr_name):
|
||||
if not isinstance(fd, int):
|
||||
fd = fd.fileno()
|
||||
waiters = self._registered[fd]
|
||||
if getattr(waiters, attr_name) is not None:
|
||||
raise _core.BusyResourceError(
|
||||
"another task is already reading / writing this fd"
|
||||
)
|
||||
setattr(waiters, attr_name, _core.current_task())
|
||||
self._update_registrations(fd)
|
||||
|
||||
def abort(_):
|
||||
setattr(waiters, attr_name, None)
|
||||
self._update_registrations(fd)
|
||||
return _core.Abort.SUCCEEDED
|
||||
|
||||
await _core.wait_task_rescheduled(abort)
|
||||
|
||||
@_public
|
||||
async def wait_readable(self, fd):
|
||||
await self._epoll_wait(fd, "read_task")
|
||||
|
||||
@_public
|
||||
async def wait_writable(self, fd):
|
||||
await self._epoll_wait(fd, "write_task")
|
||||
|
||||
@_public
|
||||
def notify_closing(self, fd):
|
||||
if not isinstance(fd, int):
|
||||
fd = fd.fileno()
|
||||
wake_all(
|
||||
self._registered[fd],
|
||||
_core.ClosedResourceError("another task closed this fd"),
|
||||
)
|
||||
del self._registered[fd]
|
||||
try:
|
||||
self._epoll.unregister(fd)
|
||||
except (OSError, ValueError):
|
||||
pass
|
||||
196
asq-env/lib/python3.9/site-packages/trio/_core/_io_kqueue.py
Normal file
196
asq-env/lib/python3.9/site-packages/trio/_core/_io_kqueue.py
Normal file
@@ -0,0 +1,196 @@
|
||||
import select
|
||||
import sys
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import outcome
|
||||
from contextlib import contextmanager
|
||||
import attr
|
||||
import errno
|
||||
|
||||
from .. import _core
|
||||
from ._run import _public
|
||||
from ._wakeup_socketpair import WakeupSocketpair
|
||||
|
||||
assert not TYPE_CHECKING or (sys.platform != "linux" and sys.platform != "win32")
|
||||
|
||||
|
||||
@attr.s(slots=True, eq=False, frozen=True)
|
||||
class _KqueueStatistics:
|
||||
tasks_waiting = attr.ib()
|
||||
monitors = attr.ib()
|
||||
backend = attr.ib(default="kqueue")
|
||||
|
||||
|
||||
@attr.s(slots=True, eq=False)
|
||||
class KqueueIOManager:
|
||||
_kqueue = attr.ib(factory=select.kqueue)
|
||||
# {(ident, filter): Task or UnboundedQueue}
|
||||
_registered = attr.ib(factory=dict)
|
||||
_force_wakeup = attr.ib(factory=WakeupSocketpair)
|
||||
_force_wakeup_fd = attr.ib(default=None)
|
||||
|
||||
def __attrs_post_init__(self):
|
||||
force_wakeup_event = select.kevent(
|
||||
self._force_wakeup.wakeup_sock, select.KQ_FILTER_READ, select.KQ_EV_ADD
|
||||
)
|
||||
self._kqueue.control([force_wakeup_event], 0)
|
||||
self._force_wakeup_fd = self._force_wakeup.wakeup_sock.fileno()
|
||||
|
||||
def statistics(self):
|
||||
tasks_waiting = 0
|
||||
monitors = 0
|
||||
for receiver in self._registered.values():
|
||||
if type(receiver) is _core.Task:
|
||||
tasks_waiting += 1
|
||||
else:
|
||||
monitors += 1
|
||||
return _KqueueStatistics(tasks_waiting=tasks_waiting, monitors=monitors)
|
||||
|
||||
def close(self):
|
||||
self._kqueue.close()
|
||||
self._force_wakeup.close()
|
||||
|
||||
def force_wakeup(self):
|
||||
self._force_wakeup.wakeup_thread_and_signal_safe()
|
||||
|
||||
def get_events(self, timeout):
|
||||
# max_events must be > 0 or kqueue gets cranky
|
||||
# and we generally want this to be strictly larger than the actual
|
||||
# number of events we get, so that we can tell that we've gotten
|
||||
# all the events in just 1 call.
|
||||
max_events = len(self._registered) + 1
|
||||
events = []
|
||||
while True:
|
||||
batch = self._kqueue.control([], max_events, timeout)
|
||||
events += batch
|
||||
if len(batch) < max_events:
|
||||
break
|
||||
else:
|
||||
timeout = 0
|
||||
# and loop back to the start
|
||||
return events
|
||||
|
||||
def process_events(self, events):
|
||||
for event in events:
|
||||
key = (event.ident, event.filter)
|
||||
if event.ident == self._force_wakeup_fd:
|
||||
self._force_wakeup.drain()
|
||||
continue
|
||||
receiver = self._registered[key]
|
||||
if event.flags & select.KQ_EV_ONESHOT:
|
||||
del self._registered[key]
|
||||
if type(receiver) is _core.Task:
|
||||
_core.reschedule(receiver, outcome.Value(event))
|
||||
else:
|
||||
receiver.put_nowait(event)
|
||||
|
||||
# kevent registration is complicated -- e.g. aio submission can
|
||||
# implicitly perform a EV_ADD, and EVFILT_PROC with NOTE_TRACK will
|
||||
# automatically register filters for child processes. So our lowlevel
|
||||
# API is *very* low-level: we expose the kqueue itself for adding
|
||||
# events or sticking into AIO submission structs, and split waiting
|
||||
# off into separate methods. It's your responsibility to make sure
|
||||
# that handle_io never receives an event without a corresponding
|
||||
# registration! This may be challenging if you want to be careful
|
||||
# about e.g. KeyboardInterrupt. Possibly this API could be improved to
|
||||
# be more ergonomic...
|
||||
|
||||
@_public
|
||||
def current_kqueue(self):
|
||||
return self._kqueue
|
||||
|
||||
@contextmanager
|
||||
@_public
|
||||
def monitor_kevent(self, ident, filter):
|
||||
key = (ident, filter)
|
||||
if key in self._registered:
|
||||
raise _core.BusyResourceError(
|
||||
"attempt to register multiple listeners for same ident/filter pair"
|
||||
)
|
||||
q = _core.UnboundedQueue()
|
||||
self._registered[key] = q
|
||||
try:
|
||||
yield q
|
||||
finally:
|
||||
del self._registered[key]
|
||||
|
||||
@_public
|
||||
async def wait_kevent(self, ident, filter, abort_func):
|
||||
key = (ident, filter)
|
||||
if key in self._registered:
|
||||
raise _core.BusyResourceError(
|
||||
"attempt to register multiple listeners for same ident/filter pair"
|
||||
)
|
||||
self._registered[key] = _core.current_task()
|
||||
|
||||
def abort(raise_cancel):
|
||||
r = abort_func(raise_cancel)
|
||||
if r is _core.Abort.SUCCEEDED:
|
||||
del self._registered[key]
|
||||
return r
|
||||
|
||||
return await _core.wait_task_rescheduled(abort)
|
||||
|
||||
async def _wait_common(self, fd, filter):
|
||||
if not isinstance(fd, int):
|
||||
fd = fd.fileno()
|
||||
flags = select.KQ_EV_ADD | select.KQ_EV_ONESHOT
|
||||
event = select.kevent(fd, filter, flags)
|
||||
self._kqueue.control([event], 0)
|
||||
|
||||
def abort(_):
|
||||
event = select.kevent(fd, filter, select.KQ_EV_DELETE)
|
||||
try:
|
||||
self._kqueue.control([event], 0)
|
||||
except OSError as exc:
|
||||
# kqueue tracks individual fds (*not* the underlying file
|
||||
# object, see _io_epoll.py for a long discussion of why this
|
||||
# distinction matters), and automatically deregisters an event
|
||||
# if the fd is closed. So if kqueue.control says that it
|
||||
# doesn't know about this event, then probably it's because
|
||||
# the fd was closed behind our backs. (Too bad we can't ask it
|
||||
# to wake us up when this happens, versus discovering it after
|
||||
# the fact... oh well, you can't have everything.)
|
||||
#
|
||||
# FreeBSD reports this using EBADF. macOS uses ENOENT.
|
||||
if exc.errno in (errno.EBADF, errno.ENOENT): # pragma: no branch
|
||||
pass
|
||||
else: # pragma: no cover
|
||||
# As far as we know, this branch can't happen.
|
||||
raise
|
||||
return _core.Abort.SUCCEEDED
|
||||
|
||||
await self.wait_kevent(fd, filter, abort)
|
||||
|
||||
@_public
|
||||
async def wait_readable(self, fd):
|
||||
await self._wait_common(fd, select.KQ_FILTER_READ)
|
||||
|
||||
@_public
|
||||
async def wait_writable(self, fd):
|
||||
await self._wait_common(fd, select.KQ_FILTER_WRITE)
|
||||
|
||||
@_public
|
||||
def notify_closing(self, fd):
|
||||
if not isinstance(fd, int):
|
||||
fd = fd.fileno()
|
||||
|
||||
for filter in [select.KQ_FILTER_READ, select.KQ_FILTER_WRITE]:
|
||||
key = (fd, filter)
|
||||
receiver = self._registered.get(key)
|
||||
|
||||
if receiver is None:
|
||||
continue
|
||||
|
||||
if type(receiver) is _core.Task:
|
||||
event = select.kevent(fd, filter, select.KQ_EV_DELETE)
|
||||
self._kqueue.control([event], 0)
|
||||
exc = _core.ClosedResourceError("another task closed this fd")
|
||||
_core.reschedule(receiver, outcome.Error(exc))
|
||||
del self._registered[key]
|
||||
else:
|
||||
# XX this is an interesting example of a case where being able
|
||||
# to close a queue would be useful...
|
||||
raise NotImplementedError(
|
||||
"can't close an fd that monitor_kevent is using"
|
||||
)
|
||||
868
asq-env/lib/python3.9/site-packages/trio/_core/_io_windows.py
Normal file
868
asq-env/lib/python3.9/site-packages/trio/_core/_io_windows.py
Normal file
@@ -0,0 +1,868 @@
|
||||
import itertools
|
||||
from contextlib import contextmanager
|
||||
import enum
|
||||
import socket
|
||||
import sys
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import attr
|
||||
from outcome import Value
|
||||
|
||||
from .. import _core
|
||||
from ._run import _public
|
||||
from ._io_common import wake_all
|
||||
|
||||
from ._windows_cffi import (
|
||||
ffi,
|
||||
kernel32,
|
||||
ntdll,
|
||||
ws2_32,
|
||||
INVALID_HANDLE_VALUE,
|
||||
raise_winerror,
|
||||
_handle,
|
||||
ErrorCodes,
|
||||
FileFlags,
|
||||
AFDPollFlags,
|
||||
WSAIoctls,
|
||||
CompletionModes,
|
||||
IoControlCodes,
|
||||
)
|
||||
|
||||
assert not TYPE_CHECKING or sys.platform == "win32"
|
||||
|
||||
# There's a lot to be said about the overall design of a Windows event
|
||||
# loop. See
|
||||
#
|
||||
# https://github.com/python-trio/trio/issues/52
|
||||
#
|
||||
# for discussion. This now just has some lower-level notes:
|
||||
#
|
||||
# How IOCP fits together:
|
||||
#
|
||||
# The general model is that you call some function like ReadFile or WriteFile
|
||||
# to tell the kernel that you want it to perform some operation, and the
|
||||
# kernel goes off and does that in the background, then at some point later it
|
||||
# sends you a notification that the operation is complete. There are some more
|
||||
# exotic APIs that don't quite fit this pattern, but most APIs do.
|
||||
#
|
||||
# Each background operation is tracked using an OVERLAPPED struct, that
|
||||
# uniquely identifies that particular operation.
|
||||
#
|
||||
# An "IOCP" (or "I/O completion port") is an object that lets the kernel send
|
||||
# us these notifications -- basically it's just a kernel->userspace queue.
|
||||
#
|
||||
# Each IOCP notification is represented by an OVERLAPPED_ENTRY struct, which
|
||||
# contains 3 fields:
|
||||
# - The "completion key". This is an opaque integer that we pick, and use
|
||||
# however is convenient.
|
||||
# - pointer to the OVERLAPPED struct for the completed operation.
|
||||
# - dwNumberOfBytesTransferred (an integer).
|
||||
#
|
||||
# And in addition, for regular I/O, the OVERLAPPED structure gets filled in
|
||||
# with:
|
||||
# - result code (named "Internal")
|
||||
# - number of bytes transferred (named "InternalHigh"); usually redundant
|
||||
# with dwNumberOfBytesTransferred.
|
||||
#
|
||||
# There are also some other entries in OVERLAPPED which only matter on input:
|
||||
# - Offset and OffsetHigh which are inputs to {Read,Write}File and
|
||||
# otherwise always zero
|
||||
# - hEvent which is for if you aren't using IOCP; we always set it to zero.
|
||||
#
|
||||
# That describes the usual pattern for operations and the usual meaning of
|
||||
# these struct fields, but really these are just some arbitrary chunks of
|
||||
# bytes that get passed back and forth, so some operations like to overload
|
||||
# them to mean something else.
|
||||
#
|
||||
# You can also directly queue an OVERLAPPED_ENTRY object to an IOCP by calling
|
||||
# PostQueuedCompletionStatus. When you use this you get to set all the
|
||||
# OVERLAPPED_ENTRY fields to arbitrary values.
|
||||
#
|
||||
# You can request to cancel any operation if you know which handle it was
|
||||
# issued on + the OVERLAPPED struct that identifies it (via CancelIoEx). This
|
||||
# request might fail because the operation has already completed, or it might
|
||||
# be queued to happen in the background, so you only find out whether it
|
||||
# succeeded or failed later, when we get back the notification for the
|
||||
# operation being complete.
|
||||
#
|
||||
# There are three types of operations that we support:
|
||||
#
|
||||
# == Regular I/O operations on handles (e.g. files or named pipes) ==
|
||||
#
|
||||
# Implemented by: register_with_iocp, wait_overlapped
|
||||
#
|
||||
# To use these, you have to register the handle with your IOCP first. Once
|
||||
# it's registered, any operations on that handle will automatically send
|
||||
# completion events to that IOCP, with a completion key that you specify *when
|
||||
# the handle is registered* (so you can't use different completion keys for
|
||||
# different operations).
|
||||
#
|
||||
# We give these two dedicated completion keys: CKeys.WAIT_OVERLAPPED for
|
||||
# regular operations, and CKeys.LATE_CANCEL that's used to make
|
||||
# wait_overlapped cancellable even if the user forgot to call
|
||||
# register_with_iocp. The problem here is that after we request the cancel,
|
||||
# wait_overlapped keeps blocking until it sees the completion notification...
|
||||
# but if the user forgot to register_with_iocp, then the completion will never
|
||||
# come, so the cancellation will never resolve. To avoid this, whenever we try
|
||||
# to cancel an I/O operation and the cancellation fails, we use
|
||||
# PostQueuedCompletionStatus to send a CKeys.LATE_CANCEL notification. If this
|
||||
# arrives before the real completion, we assume the user forgot to call
|
||||
# register_with_iocp on their handle, and raise an error accordingly.
|
||||
#
|
||||
# == Socket state notifications ==
|
||||
#
|
||||
# Implemented by: wait_readable, wait_writable
|
||||
#
|
||||
# The public APIs that windows provides for this are all really awkward and
|
||||
# don't integrate with IOCP. So we drop down to a lower level, and talk
|
||||
# directly to the socket device driver in the kernel, which is called "AFD".
|
||||
# Unfortunately, this is a totally undocumented internal API. Fortunately
|
||||
# libuv also does this, so we can be pretty confident that MS won't break it
|
||||
# on us, and there is a *little* bit of information out there if you go
|
||||
# digging.
|
||||
#
|
||||
# Basically: we open a magic file that refers to the AFD driver, register the
|
||||
# magic file with our IOCP, and then we can issue regular overlapped I/O
|
||||
# operations on that handle. Specifically, the operation we use is called
|
||||
# IOCTL_AFD_POLL, which lets us pass in a buffer describing which events we're
|
||||
# interested in on a given socket (readable, writable, etc.). Later, when the
|
||||
# operation completes, the kernel rewrites the buffer we passed in to record
|
||||
# which events happened, and uses IOCP as normal to notify us that this
|
||||
# operation has completed.
|
||||
#
|
||||
# Unfortunately, the Windows kernel seems to have bugs if you try to issue
|
||||
# multiple simultaneous IOCTL_AFD_POLL operations on the same socket (see
|
||||
# notes-to-self/afd-lab.py). So if a user calls wait_readable and
|
||||
# wait_writable at the same time, we have to combine those into a single
|
||||
# IOCTL_AFD_POLL. This means we can't just use the wait_overlapped machinery.
|
||||
# Instead we have some dedicated code to handle these operations, and a
|
||||
# dedicated completion key CKeys.AFD_POLL.
|
||||
#
|
||||
# Sources of information:
|
||||
# - https://github.com/python-trio/trio/issues/52
|
||||
# - Wepoll: https://github.com/piscisaureus/wepoll/
|
||||
# - libuv: https://github.com/libuv/libuv/
|
||||
# - ReactOS: https://github.com/reactos/reactos/
|
||||
# - Ancient leaked copies of the Windows NT and Winsock source code:
|
||||
# https://github.com/pustladi/Windows-2000/blob/661d000d50637ed6fab2329d30e31775046588a9/private/net/sockets/winsock2/wsp/msafd/select.c#L59-L655
|
||||
# https://github.com/metoo10987/WinNT4/blob/f5c14e6b42c8f45c20fe88d14c61f9d6e0386b8e/private/ntos/afd/poll.c#L68-L707
|
||||
# - The WSAEventSelect docs (this exposes a finer-grained set of events than
|
||||
# select(), so if you squint you can treat it as a source of information on
|
||||
# the fine-grained AFD poll types)
|
||||
#
|
||||
#
|
||||
# == Everything else ==
|
||||
#
|
||||
# There are also some weirder APIs for interacting with IOCP. For example, the
|
||||
# "Job" API lets you specify an IOCP handle and "completion key", and then in
|
||||
# the future whenever certain events happen it sends uses IOCP to send a
|
||||
# notification. These notifications don't correspond to any particular
|
||||
# operation; they're just spontaneous messages you get. The
|
||||
# "dwNumberOfBytesTransferred" field gets repurposed to carry an identifier
|
||||
# for the message type (e.g. JOB_OBJECT_MSG_EXIT_PROCESS), and the
|
||||
# "lpOverlapped" field gets repurposed to carry some arbitrary data that
|
||||
# depends on the message type (e.g. the pid of the process that exited).
|
||||
#
|
||||
# To handle these, we have monitor_completion_key, where we hand out an
|
||||
# unassigned completion key, let users set it up however they want, and then
|
||||
# get any events that arrive on that key.
|
||||
#
|
||||
# (Note: monitor_completion_key is not documented or fully baked; expect it to
|
||||
# change in the future.)
|
||||
|
||||
|
||||
# Our completion keys
|
||||
class CKeys(enum.IntEnum):
|
||||
AFD_POLL = 0
|
||||
WAIT_OVERLAPPED = 1
|
||||
LATE_CANCEL = 2
|
||||
FORCE_WAKEUP = 3
|
||||
USER_DEFINED = 4 # and above
|
||||
|
||||
|
||||
def _check(success):
|
||||
if not success:
|
||||
raise_winerror()
|
||||
return success
|
||||
|
||||
|
||||
def _get_underlying_socket(sock, *, which=WSAIoctls.SIO_BASE_HANDLE):
|
||||
if hasattr(sock, "fileno"):
|
||||
sock = sock.fileno()
|
||||
base_ptr = ffi.new("HANDLE *")
|
||||
out_size = ffi.new("DWORD *")
|
||||
failed = ws2_32.WSAIoctl(
|
||||
ffi.cast("SOCKET", sock),
|
||||
which,
|
||||
ffi.NULL,
|
||||
0,
|
||||
base_ptr,
|
||||
ffi.sizeof("HANDLE"),
|
||||
out_size,
|
||||
ffi.NULL,
|
||||
ffi.NULL,
|
||||
)
|
||||
if failed:
|
||||
code = ws2_32.WSAGetLastError()
|
||||
raise_winerror(code)
|
||||
return base_ptr[0]
|
||||
|
||||
|
||||
def _get_base_socket(sock):
|
||||
# There is a development kit for LSPs called Komodia Redirector.
|
||||
# It does some unusual (some might say evil) things like intercepting
|
||||
# SIO_BASE_HANDLE (fails) and SIO_BSP_HANDLE_SELECT (returns the same
|
||||
# socket) in a misguided attempt to prevent bypassing it. It's been used
|
||||
# in malware including the infamous Lenovo Superfish incident from 2015,
|
||||
# but unfortunately is also used in some legitimate products such as
|
||||
# parental control tools and Astrill VPN. Komodia happens to not
|
||||
# block SIO_BSP_HANDLE_POLL, so we'll try SIO_BASE_HANDLE and fall back
|
||||
# to SIO_BSP_HANDLE_POLL if it doesn't work.
|
||||
# References:
|
||||
# - https://github.com/piscisaureus/wepoll/blob/0598a791bf9cbbf480793d778930fc635b044980/wepoll.c#L2223
|
||||
# - https://github.com/tokio-rs/mio/issues/1314
|
||||
|
||||
while True:
|
||||
try:
|
||||
# If this is not a Komodia-intercepted socket, we can just use
|
||||
# SIO_BASE_HANDLE.
|
||||
return _get_underlying_socket(sock)
|
||||
except OSError as ex:
|
||||
if ex.winerror == ErrorCodes.ERROR_NOT_SOCKET:
|
||||
# SIO_BASE_HANDLE might fail even without LSP intervention,
|
||||
# if we get something that's not a socket.
|
||||
raise
|
||||
if hasattr(sock, "fileno"):
|
||||
sock = sock.fileno()
|
||||
sock = _handle(sock)
|
||||
next_sock = _get_underlying_socket(
|
||||
sock, which=WSAIoctls.SIO_BSP_HANDLE_POLL
|
||||
)
|
||||
if next_sock == sock:
|
||||
# If BSP_HANDLE_POLL returns the same socket we already had,
|
||||
# then there's no layering going on and we need to fail
|
||||
# to prevent an infinite loop.
|
||||
raise RuntimeError(
|
||||
"Unexpected network configuration detected: "
|
||||
"SIO_BASE_HANDLE failed and SIO_BSP_HANDLE_POLL didn't "
|
||||
"return a different socket. Please file a bug at "
|
||||
"https://github.com/python-trio/trio/issues/new, "
|
||||
"and include the output of running: "
|
||||
"netsh winsock show catalog"
|
||||
)
|
||||
# Otherwise we've gotten at least one layer deeper, so
|
||||
# loop back around to keep digging.
|
||||
sock = next_sock
|
||||
|
||||
|
||||
def _afd_helper_handle():
|
||||
# The "AFD" driver is exposed at the NT path "\Device\Afd". We're using
|
||||
# the Win32 CreateFile, though, so we have to pass a Win32 path. \\.\ is
|
||||
# how Win32 refers to the NT \GLOBAL??\ directory, and GLOBALROOT is a
|
||||
# symlink inside that directory that points to the root of the NT path
|
||||
# system. So by sticking that in front of the NT path, we get a Win32
|
||||
# path. Alternatively, we could use NtCreateFile directly, since it takes
|
||||
# an NT path. But we already wrap CreateFileW so this was easier.
|
||||
# References:
|
||||
# https://blogs.msdn.microsoft.com/jeremykuhne/2016/05/02/dos-to-nt-a-paths-journey/
|
||||
# https://stackoverflow.com/a/21704022
|
||||
#
|
||||
# I'm actually not sure what the \Trio part at the end of the path does.
|
||||
# Wepoll uses \Device\Afd\Wepoll, so I just copied them. (I'm guessing it
|
||||
# might be visible in some debug tools, and is otherwise arbitrary?)
|
||||
rawname = r"\\.\GLOBALROOT\Device\Afd\Trio".encode("utf-16le") + b"\0\0"
|
||||
rawname_buf = ffi.from_buffer(rawname)
|
||||
|
||||
handle = kernel32.CreateFileW(
|
||||
ffi.cast("LPCWSTR", rawname_buf),
|
||||
FileFlags.SYNCHRONIZE,
|
||||
FileFlags.FILE_SHARE_READ | FileFlags.FILE_SHARE_WRITE,
|
||||
ffi.NULL, # no security attributes
|
||||
FileFlags.OPEN_EXISTING,
|
||||
FileFlags.FILE_FLAG_OVERLAPPED,
|
||||
ffi.NULL, # no template file
|
||||
)
|
||||
if handle == INVALID_HANDLE_VALUE: # pragma: no cover
|
||||
raise_winerror()
|
||||
return handle
|
||||
|
||||
|
||||
# AFD_POLL has a finer-grained set of events than other APIs. We collapse them
|
||||
# down into Unix-style "readable" and "writable".
|
||||
#
|
||||
# Note: AFD_POLL_LOCAL_CLOSE isn't a reliable substitute for notify_closing(),
|
||||
# because even if the user closes the socket *handle*, the socket *object*
|
||||
# could still remain open, e.g. if the socket was dup'ed (possibly into
|
||||
# another process). Explicitly calling notify_closing() guarantees that
|
||||
# everyone waiting on the *handle* wakes up, which is what you'd expect.
|
||||
#
|
||||
# However, we can't avoid getting LOCAL_CLOSE notifications -- the kernel
|
||||
# delivers them whether we ask for them or not -- so better to include them
|
||||
# here for documentation, and so that when we check (delivered & requested) we
|
||||
# get a match.
|
||||
|
||||
READABLE_FLAGS = (
|
||||
AFDPollFlags.AFD_POLL_RECEIVE
|
||||
| AFDPollFlags.AFD_POLL_ACCEPT
|
||||
| AFDPollFlags.AFD_POLL_DISCONNECT # other side sent an EOF
|
||||
| AFDPollFlags.AFD_POLL_ABORT
|
||||
| AFDPollFlags.AFD_POLL_LOCAL_CLOSE
|
||||
)
|
||||
|
||||
WRITABLE_FLAGS = (
|
||||
AFDPollFlags.AFD_POLL_SEND
|
||||
| AFDPollFlags.AFD_POLL_CONNECT_FAIL
|
||||
| AFDPollFlags.AFD_POLL_ABORT
|
||||
| AFDPollFlags.AFD_POLL_LOCAL_CLOSE
|
||||
)
|
||||
|
||||
|
||||
# Annoyingly, while the API makes it *seem* like you can happily issue as many
|
||||
# independent AFD_POLL operations as you want without them interfering with
|
||||
# each other, in fact if you issue two AFD_POLL operations for the same socket
|
||||
# at the same time with notification going to the same IOCP port, then Windows
|
||||
# gets super confused. For example, if we issue one operation from
|
||||
# wait_readable, and another independent operation from wait_writable, then
|
||||
# Windows may complete the wait_writable operation when the socket becomes
|
||||
# readable.
|
||||
#
|
||||
# To avoid this, we have to coalesce all the operations on a single socket
|
||||
# into one, and when the set of waiters changes we have to throw away the old
|
||||
# operation and start a new one.
|
||||
@attr.s(slots=True, eq=False)
|
||||
class AFDWaiters:
|
||||
read_task = attr.ib(default=None)
|
||||
write_task = attr.ib(default=None)
|
||||
current_op = attr.ib(default=None)
|
||||
|
||||
|
||||
# We also need to bundle up all the info for a single op into a standalone
|
||||
# object, because we need to keep all these objects alive until the operation
|
||||
# finishes, even if we're throwing it away.
|
||||
@attr.s(slots=True, eq=False, frozen=True)
|
||||
class AFDPollOp:
|
||||
lpOverlapped = attr.ib()
|
||||
poll_info = attr.ib()
|
||||
waiters = attr.ib()
|
||||
afd_group = attr.ib()
|
||||
|
||||
|
||||
# The Windows kernel has a weird issue when using AFD handles. If you have N
|
||||
# instances of wait_readable/wait_writable registered with a single AFD handle,
|
||||
# then cancelling any one of them takes something like O(N**2) time. So if we
|
||||
# used just a single AFD handle, then cancellation would quickly become very
|
||||
# expensive, e.g. a program with N active sockets would take something like
|
||||
# O(N**3) time to unwind after control-C. The solution is to spread our sockets
|
||||
# out over multiple AFD handles, so that N doesn't grow too large for any
|
||||
# individual handle.
|
||||
MAX_AFD_GROUP_SIZE = 500 # at 1000, the cubic scaling is just starting to bite
|
||||
|
||||
|
||||
@attr.s(slots=True, eq=False)
|
||||
class AFDGroup:
|
||||
size = attr.ib()
|
||||
handle = attr.ib()
|
||||
|
||||
|
||||
@attr.s(slots=True, eq=False, frozen=True)
|
||||
class _WindowsStatistics:
|
||||
tasks_waiting_read = attr.ib()
|
||||
tasks_waiting_write = attr.ib()
|
||||
tasks_waiting_overlapped = attr.ib()
|
||||
completion_key_monitors = attr.ib()
|
||||
backend = attr.ib(default="windows")
|
||||
|
||||
|
||||
# Maximum number of events to dequeue from the completion port on each pass
|
||||
# through the run loop. Somewhat arbitrary. Should be large enough to collect
|
||||
# a good set of tasks on each loop, but not so large to waste tons of memory.
|
||||
# (Each WindowsIOManager holds a buffer whose size is ~32x this number.)
|
||||
MAX_EVENTS = 1000
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
class CompletionKeyEventInfo:
|
||||
lpOverlapped = attr.ib()
|
||||
dwNumberOfBytesTransferred = attr.ib()
|
||||
|
||||
|
||||
class WindowsIOManager:
|
||||
def __init__(self):
|
||||
# If this method raises an exception, then __del__ could run on a
|
||||
# half-initialized object. So we initialize everything that __del__
|
||||
# touches to safe values up front, before we do anything that can
|
||||
# fail.
|
||||
self._iocp = None
|
||||
self._all_afd_handles = []
|
||||
|
||||
self._iocp = _check(
|
||||
kernel32.CreateIoCompletionPort(INVALID_HANDLE_VALUE, ffi.NULL, 0, 0)
|
||||
)
|
||||
self._events = ffi.new("OVERLAPPED_ENTRY[]", MAX_EVENTS)
|
||||
|
||||
self._vacant_afd_groups = set()
|
||||
# {lpOverlapped: AFDPollOp}
|
||||
self._afd_ops = {}
|
||||
# {socket handle: AFDWaiters}
|
||||
self._afd_waiters = {}
|
||||
|
||||
# {lpOverlapped: task}
|
||||
self._overlapped_waiters = {}
|
||||
self._posted_too_late_to_cancel = set()
|
||||
|
||||
self._completion_key_queues = {}
|
||||
self._completion_key_counter = itertools.count(CKeys.USER_DEFINED)
|
||||
|
||||
with socket.socket() as s:
|
||||
# We assume we're not working with any LSP that changes
|
||||
# how select() is supposed to work. Validate this by
|
||||
# ensuring that the result of SIO_BSP_HANDLE_SELECT (the
|
||||
# LSP-hookable mechanism for "what should I use for
|
||||
# select()?") matches that of SIO_BASE_HANDLE ("what is
|
||||
# the real non-hooked underlying socket here?").
|
||||
#
|
||||
# This doesn't work for Komodia-based LSPs; see the comments
|
||||
# in _get_base_socket() for details. But we have special
|
||||
# logic for those, so we just skip this check if
|
||||
# SIO_BASE_HANDLE fails.
|
||||
|
||||
# LSPs can in theory override this, but we believe that it never
|
||||
# actually happens in the wild (except Komodia)
|
||||
select_handle = _get_underlying_socket(
|
||||
s, which=WSAIoctls.SIO_BSP_HANDLE_SELECT
|
||||
)
|
||||
try:
|
||||
# LSPs shouldn't override this...
|
||||
base_handle = _get_underlying_socket(s, which=WSAIoctls.SIO_BASE_HANDLE)
|
||||
except OSError:
|
||||
# But Komodia-based LSPs do anyway, in a way that causes
|
||||
# a failure with WSAEFAULT. We have special handling for
|
||||
# them in _get_base_socket(). Make sure it works.
|
||||
_get_base_socket(s)
|
||||
else:
|
||||
if base_handle != select_handle:
|
||||
raise RuntimeError(
|
||||
"Unexpected network configuration detected: "
|
||||
"SIO_BASE_HANDLE and SIO_BSP_HANDLE_SELECT differ. "
|
||||
"Please file a bug at "
|
||||
"https://github.com/python-trio/trio/issues/new, "
|
||||
"and include the output of running: "
|
||||
"netsh winsock show catalog"
|
||||
)
|
||||
|
||||
def close(self):
|
||||
try:
|
||||
if self._iocp is not None:
|
||||
iocp = self._iocp
|
||||
self._iocp = None
|
||||
_check(kernel32.CloseHandle(iocp))
|
||||
finally:
|
||||
while self._all_afd_handles:
|
||||
afd_handle = self._all_afd_handles.pop()
|
||||
_check(kernel32.CloseHandle(afd_handle))
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def statistics(self):
|
||||
tasks_waiting_read = 0
|
||||
tasks_waiting_write = 0
|
||||
for waiter in self._afd_waiters.values():
|
||||
if waiter.read_task is not None:
|
||||
tasks_waiting_read += 1
|
||||
if waiter.write_task is not None:
|
||||
tasks_waiting_write += 1
|
||||
return _WindowsStatistics(
|
||||
tasks_waiting_read=tasks_waiting_read,
|
||||
tasks_waiting_write=tasks_waiting_write,
|
||||
tasks_waiting_overlapped=len(self._overlapped_waiters),
|
||||
completion_key_monitors=len(self._completion_key_queues),
|
||||
)
|
||||
|
||||
def force_wakeup(self):
|
||||
_check(
|
||||
kernel32.PostQueuedCompletionStatus(
|
||||
self._iocp, 0, CKeys.FORCE_WAKEUP, ffi.NULL
|
||||
)
|
||||
)
|
||||
|
||||
def get_events(self, timeout):
|
||||
received = ffi.new("PULONG")
|
||||
milliseconds = round(1000 * timeout)
|
||||
if timeout > 0 and milliseconds == 0:
|
||||
milliseconds = 1
|
||||
try:
|
||||
_check(
|
||||
kernel32.GetQueuedCompletionStatusEx(
|
||||
self._iocp, self._events, MAX_EVENTS, received, milliseconds, 0
|
||||
)
|
||||
)
|
||||
except OSError as exc:
|
||||
if exc.winerror != ErrorCodes.WAIT_TIMEOUT: # pragma: no cover
|
||||
raise
|
||||
return 0
|
||||
return received[0]
|
||||
|
||||
def process_events(self, received):
|
||||
for i in range(received):
|
||||
entry = self._events[i]
|
||||
if entry.lpCompletionKey == CKeys.AFD_POLL:
|
||||
lpo = entry.lpOverlapped
|
||||
op = self._afd_ops.pop(lpo)
|
||||
waiters = op.waiters
|
||||
if waiters.current_op is not op:
|
||||
# Stale op, nothing to do
|
||||
pass
|
||||
else:
|
||||
waiters.current_op = None
|
||||
# I don't think this can happen, so if it does let's crash
|
||||
# and get a debug trace.
|
||||
if lpo.Internal != 0: # pragma: no cover
|
||||
code = ntdll.RtlNtStatusToDosError(lpo.Internal)
|
||||
raise_winerror(code)
|
||||
flags = op.poll_info.Handles[0].Events
|
||||
if waiters.read_task and flags & READABLE_FLAGS:
|
||||
_core.reschedule(waiters.read_task)
|
||||
waiters.read_task = None
|
||||
if waiters.write_task and flags & WRITABLE_FLAGS:
|
||||
_core.reschedule(waiters.write_task)
|
||||
waiters.write_task = None
|
||||
self._refresh_afd(op.poll_info.Handles[0].Handle)
|
||||
elif entry.lpCompletionKey == CKeys.WAIT_OVERLAPPED:
|
||||
# Regular I/O event, dispatch on lpOverlapped
|
||||
waiter = self._overlapped_waiters.pop(entry.lpOverlapped)
|
||||
overlapped = entry.lpOverlapped
|
||||
transferred = entry.dwNumberOfBytesTransferred
|
||||
info = CompletionKeyEventInfo(
|
||||
lpOverlapped=overlapped, dwNumberOfBytesTransferred=transferred
|
||||
)
|
||||
_core.reschedule(waiter, Value(info))
|
||||
elif entry.lpCompletionKey == CKeys.LATE_CANCEL:
|
||||
# Post made by a regular I/O event's abort_fn
|
||||
# after it failed to cancel the I/O. If we still
|
||||
# have a waiter with this lpOverlapped, we didn't
|
||||
# get the regular I/O completion and almost
|
||||
# certainly the user forgot to call
|
||||
# register_with_iocp.
|
||||
self._posted_too_late_to_cancel.remove(entry.lpOverlapped)
|
||||
try:
|
||||
waiter = self._overlapped_waiters.pop(entry.lpOverlapped)
|
||||
except KeyError:
|
||||
# Looks like the actual completion got here before this
|
||||
# fallback post did -- we're in the "expected" case of
|
||||
# too-late-to-cancel, where the user did nothing wrong.
|
||||
# Nothing more to do.
|
||||
pass
|
||||
else:
|
||||
exc = _core.TrioInternalError(
|
||||
"Failed to cancel overlapped I/O in {} and didn't "
|
||||
"receive the completion either. Did you forget to "
|
||||
"call register_with_iocp()?".format(waiter.name)
|
||||
)
|
||||
# Raising this out of handle_io ensures that
|
||||
# the user will see our message even if some
|
||||
# other task is in an uncancellable wait due
|
||||
# to the same underlying forgot-to-register
|
||||
# issue (if their CancelIoEx succeeds, we
|
||||
# have no way of noticing that their completion
|
||||
# won't arrive). Unfortunately it loses the
|
||||
# task traceback. If you're debugging this
|
||||
# error and can't tell where it's coming from,
|
||||
# try changing this line to
|
||||
# _core.reschedule(waiter, outcome.Error(exc))
|
||||
raise exc
|
||||
elif entry.lpCompletionKey == CKeys.FORCE_WAKEUP:
|
||||
pass
|
||||
else:
|
||||
# dispatch on lpCompletionKey
|
||||
queue = self._completion_key_queues[entry.lpCompletionKey]
|
||||
overlapped = int(ffi.cast("uintptr_t", entry.lpOverlapped))
|
||||
transferred = entry.dwNumberOfBytesTransferred
|
||||
info = CompletionKeyEventInfo(
|
||||
lpOverlapped=overlapped, dwNumberOfBytesTransferred=transferred
|
||||
)
|
||||
queue.put_nowait(info)
|
||||
|
||||
def _register_with_iocp(self, handle, completion_key):
|
||||
handle = _handle(handle)
|
||||
_check(kernel32.CreateIoCompletionPort(handle, self._iocp, completion_key, 0))
|
||||
# Supposedly this makes things slightly faster, by disabling the
|
||||
# ability to do WaitForSingleObject(handle). We would never want to do
|
||||
# that anyway, so might as well get the extra speed (if any).
|
||||
# Ref: http://www.lenholgate.com/blog/2009/09/interesting-blog-posts-on-high-performance-servers.html
|
||||
_check(
|
||||
kernel32.SetFileCompletionNotificationModes(
|
||||
handle, CompletionModes.FILE_SKIP_SET_EVENT_ON_HANDLE
|
||||
)
|
||||
)
|
||||
|
||||
################################################################
|
||||
# AFD stuff
|
||||
################################################################
|
||||
|
||||
def _refresh_afd(self, base_handle):
|
||||
waiters = self._afd_waiters[base_handle]
|
||||
if waiters.current_op is not None:
|
||||
afd_group = waiters.current_op.afd_group
|
||||
try:
|
||||
_check(
|
||||
kernel32.CancelIoEx(
|
||||
afd_group.handle, waiters.current_op.lpOverlapped
|
||||
)
|
||||
)
|
||||
except OSError as exc:
|
||||
if exc.winerror != ErrorCodes.ERROR_NOT_FOUND:
|
||||
# I don't think this is possible, so if it happens let's
|
||||
# crash noisily.
|
||||
raise # pragma: no cover
|
||||
waiters.current_op = None
|
||||
afd_group.size -= 1
|
||||
self._vacant_afd_groups.add(afd_group)
|
||||
|
||||
flags = 0
|
||||
if waiters.read_task is not None:
|
||||
flags |= READABLE_FLAGS
|
||||
if waiters.write_task is not None:
|
||||
flags |= WRITABLE_FLAGS
|
||||
|
||||
if not flags:
|
||||
del self._afd_waiters[base_handle]
|
||||
else:
|
||||
try:
|
||||
afd_group = self._vacant_afd_groups.pop()
|
||||
except KeyError:
|
||||
afd_group = AFDGroup(0, _afd_helper_handle())
|
||||
self._register_with_iocp(afd_group.handle, CKeys.AFD_POLL)
|
||||
self._all_afd_handles.append(afd_group.handle)
|
||||
self._vacant_afd_groups.add(afd_group)
|
||||
|
||||
lpOverlapped = ffi.new("LPOVERLAPPED")
|
||||
|
||||
poll_info = ffi.new("AFD_POLL_INFO *")
|
||||
poll_info.Timeout = 2 ** 63 - 1 # INT64_MAX
|
||||
poll_info.NumberOfHandles = 1
|
||||
poll_info.Exclusive = 0
|
||||
poll_info.Handles[0].Handle = base_handle
|
||||
poll_info.Handles[0].Status = 0
|
||||
poll_info.Handles[0].Events = flags
|
||||
|
||||
try:
|
||||
_check(
|
||||
kernel32.DeviceIoControl(
|
||||
afd_group.handle,
|
||||
IoControlCodes.IOCTL_AFD_POLL,
|
||||
poll_info,
|
||||
ffi.sizeof("AFD_POLL_INFO"),
|
||||
poll_info,
|
||||
ffi.sizeof("AFD_POLL_INFO"),
|
||||
ffi.NULL,
|
||||
lpOverlapped,
|
||||
)
|
||||
)
|
||||
except OSError as exc:
|
||||
if exc.winerror != ErrorCodes.ERROR_IO_PENDING:
|
||||
# This could happen if the socket handle got closed behind
|
||||
# our back while a wait_* call was pending, and we tried
|
||||
# to re-issue the call. Clear our state and wake up any
|
||||
# pending calls.
|
||||
del self._afd_waiters[base_handle]
|
||||
# Do this last, because it could raise.
|
||||
wake_all(waiters, exc)
|
||||
return
|
||||
op = AFDPollOp(lpOverlapped, poll_info, waiters, afd_group)
|
||||
waiters.current_op = op
|
||||
self._afd_ops[lpOverlapped] = op
|
||||
afd_group.size += 1
|
||||
if afd_group.size >= MAX_AFD_GROUP_SIZE:
|
||||
self._vacant_afd_groups.remove(afd_group)
|
||||
|
||||
async def _afd_poll(self, sock, mode):
|
||||
base_handle = _get_base_socket(sock)
|
||||
waiters = self._afd_waiters.get(base_handle)
|
||||
if waiters is None:
|
||||
waiters = AFDWaiters()
|
||||
self._afd_waiters[base_handle] = waiters
|
||||
if getattr(waiters, mode) is not None:
|
||||
raise _core.BusyResourceError
|
||||
setattr(waiters, mode, _core.current_task())
|
||||
# Could potentially raise if the handle is somehow invalid; that's OK,
|
||||
# we let it escape.
|
||||
self._refresh_afd(base_handle)
|
||||
|
||||
def abort_fn(_):
|
||||
setattr(waiters, mode, None)
|
||||
self._refresh_afd(base_handle)
|
||||
return _core.Abort.SUCCEEDED
|
||||
|
||||
await _core.wait_task_rescheduled(abort_fn)
|
||||
|
||||
@_public
|
||||
async def wait_readable(self, sock):
|
||||
await self._afd_poll(sock, "read_task")
|
||||
|
||||
@_public
|
||||
async def wait_writable(self, sock):
|
||||
await self._afd_poll(sock, "write_task")
|
||||
|
||||
@_public
|
||||
def notify_closing(self, handle):
|
||||
handle = _get_base_socket(handle)
|
||||
waiters = self._afd_waiters.get(handle)
|
||||
if waiters is not None:
|
||||
wake_all(waiters, _core.ClosedResourceError())
|
||||
self._refresh_afd(handle)
|
||||
|
||||
################################################################
|
||||
# Regular overlapped operations
|
||||
################################################################
|
||||
|
||||
@_public
|
||||
def register_with_iocp(self, handle):
|
||||
self._register_with_iocp(handle, CKeys.WAIT_OVERLAPPED)
|
||||
|
||||
@_public
|
||||
async def wait_overlapped(self, handle, lpOverlapped):
|
||||
handle = _handle(handle)
|
||||
if isinstance(lpOverlapped, int):
|
||||
lpOverlapped = ffi.cast("LPOVERLAPPED", lpOverlapped)
|
||||
if lpOverlapped in self._overlapped_waiters:
|
||||
raise _core.BusyResourceError(
|
||||
"another task is already waiting on that lpOverlapped"
|
||||
)
|
||||
task = _core.current_task()
|
||||
self._overlapped_waiters[lpOverlapped] = task
|
||||
raise_cancel = None
|
||||
|
||||
def abort(raise_cancel_):
|
||||
nonlocal raise_cancel
|
||||
raise_cancel = raise_cancel_
|
||||
try:
|
||||
_check(kernel32.CancelIoEx(handle, lpOverlapped))
|
||||
except OSError as exc:
|
||||
if exc.winerror == ErrorCodes.ERROR_NOT_FOUND:
|
||||
# Too late to cancel. If this happens because the
|
||||
# operation is already completed, we don't need to do
|
||||
# anything; we'll get a notification of that completion
|
||||
# soon. But another possibility is that the operation was
|
||||
# performed on a handle that wasn't registered with our
|
||||
# IOCP (ie, the user forgot to call register_with_iocp),
|
||||
# in which case we're just never going to see the
|
||||
# completion. To avoid an uncancellable infinite sleep in
|
||||
# the latter case, we'll PostQueuedCompletionStatus here,
|
||||
# and if our post arrives before the original completion
|
||||
# does, we'll assume the handle wasn't registered.
|
||||
_check(
|
||||
kernel32.PostQueuedCompletionStatus(
|
||||
self._iocp, 0, CKeys.LATE_CANCEL, lpOverlapped
|
||||
)
|
||||
)
|
||||
# Keep the lpOverlapped referenced so its address
|
||||
# doesn't get reused until our posted completion
|
||||
# status has been processed. Otherwise, we can
|
||||
# get confused about which completion goes with
|
||||
# which I/O.
|
||||
self._posted_too_late_to_cancel.add(lpOverlapped)
|
||||
else: # pragma: no cover
|
||||
raise _core.TrioInternalError(
|
||||
"CancelIoEx failed with unexpected error"
|
||||
) from exc
|
||||
return _core.Abort.FAILED
|
||||
|
||||
info = await _core.wait_task_rescheduled(abort)
|
||||
if lpOverlapped.Internal != 0:
|
||||
# the lpOverlapped reports the error as an NT status code,
|
||||
# which we must convert back to a Win32 error code before
|
||||
# it will produce the right sorts of exceptions
|
||||
code = ntdll.RtlNtStatusToDosError(lpOverlapped.Internal)
|
||||
if code == ErrorCodes.ERROR_OPERATION_ABORTED:
|
||||
if raise_cancel is not None:
|
||||
raise_cancel()
|
||||
else:
|
||||
# We didn't request this cancellation, so assume
|
||||
# it happened due to the underlying handle being
|
||||
# closed before the operation could complete.
|
||||
raise _core.ClosedResourceError("another task closed this resource")
|
||||
else:
|
||||
raise_winerror(code)
|
||||
return info
|
||||
|
||||
async def _perform_overlapped(self, handle, submit_fn):
|
||||
# submit_fn(lpOverlapped) submits some I/O
|
||||
# it may raise an OSError with ERROR_IO_PENDING
|
||||
# the handle must already be registered using
|
||||
# register_with_iocp(handle)
|
||||
# This always does a schedule point, but it's possible that the
|
||||
# operation will not be cancellable, depending on how Windows is
|
||||
# feeling today. So we need to check for cancellation manually.
|
||||
await _core.checkpoint_if_cancelled()
|
||||
lpOverlapped = ffi.new("LPOVERLAPPED")
|
||||
try:
|
||||
submit_fn(lpOverlapped)
|
||||
except OSError as exc:
|
||||
if exc.winerror != ErrorCodes.ERROR_IO_PENDING:
|
||||
raise
|
||||
await self.wait_overlapped(handle, lpOverlapped)
|
||||
return lpOverlapped
|
||||
|
||||
@_public
|
||||
async def write_overlapped(self, handle, data, file_offset=0):
|
||||
with ffi.from_buffer(data) as cbuf:
|
||||
|
||||
def submit_write(lpOverlapped):
|
||||
# yes, these are the real documented names
|
||||
offset_fields = lpOverlapped.DUMMYUNIONNAME.DUMMYSTRUCTNAME
|
||||
offset_fields.Offset = file_offset & 0xFFFFFFFF
|
||||
offset_fields.OffsetHigh = file_offset >> 32
|
||||
_check(
|
||||
kernel32.WriteFile(
|
||||
_handle(handle),
|
||||
ffi.cast("LPCVOID", cbuf),
|
||||
len(cbuf),
|
||||
ffi.NULL,
|
||||
lpOverlapped,
|
||||
)
|
||||
)
|
||||
|
||||
lpOverlapped = await self._perform_overlapped(handle, submit_write)
|
||||
# this is "number of bytes transferred"
|
||||
return lpOverlapped.InternalHigh
|
||||
|
||||
@_public
|
||||
async def readinto_overlapped(self, handle, buffer, file_offset=0):
|
||||
with ffi.from_buffer(buffer, require_writable=True) as cbuf:
|
||||
|
||||
def submit_read(lpOverlapped):
|
||||
offset_fields = lpOverlapped.DUMMYUNIONNAME.DUMMYSTRUCTNAME
|
||||
offset_fields.Offset = file_offset & 0xFFFFFFFF
|
||||
offset_fields.OffsetHigh = file_offset >> 32
|
||||
_check(
|
||||
kernel32.ReadFile(
|
||||
_handle(handle),
|
||||
ffi.cast("LPVOID", cbuf),
|
||||
len(cbuf),
|
||||
ffi.NULL,
|
||||
lpOverlapped,
|
||||
)
|
||||
)
|
||||
|
||||
lpOverlapped = await self._perform_overlapped(handle, submit_read)
|
||||
return lpOverlapped.InternalHigh
|
||||
|
||||
################################################################
|
||||
# Raw IOCP operations
|
||||
################################################################
|
||||
|
||||
@_public
|
||||
def current_iocp(self):
|
||||
return int(ffi.cast("uintptr_t", self._iocp))
|
||||
|
||||
@contextmanager
|
||||
@_public
|
||||
def monitor_completion_key(self):
|
||||
key = next(self._completion_key_counter)
|
||||
queue = _core.UnboundedQueue()
|
||||
self._completion_key_queues[key] = queue
|
||||
try:
|
||||
yield (key, queue)
|
||||
finally:
|
||||
del self._completion_key_queues[key]
|
||||
200
asq-env/lib/python3.9/site-packages/trio/_core/_ki.py
Normal file
200
asq-env/lib/python3.9/site-packages/trio/_core/_ki.py
Normal file
@@ -0,0 +1,200 @@
|
||||
import inspect
|
||||
import signal
|
||||
import sys
|
||||
from functools import wraps
|
||||
import attr
|
||||
|
||||
import async_generator
|
||||
|
||||
from .._util import is_main_thread
|
||||
|
||||
if False:
|
||||
from typing import Any, TypeVar, Callable
|
||||
|
||||
F = TypeVar("F", bound=Callable[..., Any])
|
||||
|
||||
# In ordinary single-threaded Python code, when you hit control-C, it raises
|
||||
# an exception and automatically does all the regular unwinding stuff.
|
||||
#
|
||||
# In Trio code, we would like hitting control-C to raise an exception and
|
||||
# automatically do all the regular unwinding stuff. In particular, we would
|
||||
# like to maintain our invariant that all tasks always run to completion (one
|
||||
# way or another), by unwinding all of them.
|
||||
#
|
||||
# But it's basically impossible to write the core task running code in such a
|
||||
# way that it can maintain this invariant in the face of KeyboardInterrupt
|
||||
# exceptions arising at arbitrary bytecode positions. Similarly, if a
|
||||
# KeyboardInterrupt happened at the wrong moment inside pretty much any of our
|
||||
# inter-task synchronization or I/O primitives, then the system state could
|
||||
# get corrupted and prevent our being able to clean up properly.
|
||||
#
|
||||
# So, we need a way to defer KeyboardInterrupt processing from these critical
|
||||
# sections.
|
||||
#
|
||||
# Things that don't work:
|
||||
#
|
||||
# - Listen for SIGINT and process it in a system task: works fine for
|
||||
# well-behaved programs that regularly pass through the event loop, but if
|
||||
# user-code goes into an infinite loop then it can't be interrupted. Which
|
||||
# is unfortunate, since dealing with infinite loops is what
|
||||
# KeyboardInterrupt is for!
|
||||
#
|
||||
# - Use pthread_sigmask to disable signal delivery during critical section:
|
||||
# (a) windows has no pthread_sigmask, (b) python threads start with all
|
||||
# signals unblocked, so if there are any threads around they'll receive the
|
||||
# signal and then tell the main thread to run the handler, even if the main
|
||||
# thread has that signal blocked.
|
||||
#
|
||||
# - Install a signal handler which checks a global variable to decide whether
|
||||
# to raise the exception immediately (if we're in a non-critical section),
|
||||
# or to schedule it on the event loop (if we're in a critical section). The
|
||||
# problem here is that it's impossible to transition safely out of user code:
|
||||
#
|
||||
# with keyboard_interrupt_enabled:
|
||||
# msg = coro.send(value)
|
||||
#
|
||||
# If this raises a KeyboardInterrupt, it might be because the coroutine got
|
||||
# interrupted and has unwound... or it might be the KeyboardInterrupt
|
||||
# arrived just *after* 'send' returned, so the coroutine is still running
|
||||
# but we just lost the message it sent. (And worse, in our actual task
|
||||
# runner, the send is hidden inside a utility function etc.)
|
||||
#
|
||||
# Solution:
|
||||
#
|
||||
# Mark *stack frames* as being interrupt-safe or interrupt-unsafe, and from
|
||||
# the signal handler check which kind of frame we're currently in when
|
||||
# deciding whether to raise or schedule the exception.
|
||||
#
|
||||
# There are still some cases where this can fail, like if someone hits
|
||||
# control-C while the process is in the event loop, and then it immediately
|
||||
# enters an infinite loop in user code. In this case the user has to hit
|
||||
# control-C a second time. And of course if the user code is written so that
|
||||
# it doesn't actually exit after a task crashes and everything gets cancelled,
|
||||
# then there's not much to be done. (Hitting control-C repeatedly might help,
|
||||
# but in general the solution is to kill the process some other way, just like
|
||||
# for any Python program that's written to catch and ignore
|
||||
# KeyboardInterrupt.)
|
||||
|
||||
# We use this special string as a unique key into the frame locals dictionary.
|
||||
# The @ ensures it is not a valid identifier and can't clash with any possible
|
||||
# real local name. See: https://github.com/python-trio/trio/issues/469
|
||||
LOCALS_KEY_KI_PROTECTION_ENABLED = "@TRIO_KI_PROTECTION_ENABLED"
|
||||
|
||||
|
||||
# NB: according to the signal.signal docs, 'frame' can be None on entry to
|
||||
# this function:
|
||||
def ki_protection_enabled(frame):
|
||||
while frame is not None:
|
||||
if LOCALS_KEY_KI_PROTECTION_ENABLED in frame.f_locals:
|
||||
return frame.f_locals[LOCALS_KEY_KI_PROTECTION_ENABLED]
|
||||
if frame.f_code.co_name == "__del__":
|
||||
return True
|
||||
frame = frame.f_back
|
||||
return True
|
||||
|
||||
|
||||
def currently_ki_protected():
|
||||
r"""Check whether the calling code has :exc:`KeyboardInterrupt` protection
|
||||
enabled.
|
||||
|
||||
It's surprisingly easy to think that one's :exc:`KeyboardInterrupt`
|
||||
protection is enabled when it isn't, or vice-versa. This function tells
|
||||
you what Trio thinks of the matter, which makes it useful for ``assert``\s
|
||||
and unit tests.
|
||||
|
||||
Returns:
|
||||
bool: True if protection is enabled, and False otherwise.
|
||||
|
||||
"""
|
||||
return ki_protection_enabled(sys._getframe())
|
||||
|
||||
|
||||
def _ki_protection_decorator(enabled):
|
||||
def decorator(fn):
|
||||
# In some version of Python, isgeneratorfunction returns true for
|
||||
# coroutine functions, so we have to check for coroutine functions
|
||||
# first.
|
||||
if inspect.iscoroutinefunction(fn):
|
||||
|
||||
@wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
# See the comment for regular generators below
|
||||
coro = fn(*args, **kwargs)
|
||||
coro.cr_frame.f_locals[LOCALS_KEY_KI_PROTECTION_ENABLED] = enabled
|
||||
return coro
|
||||
|
||||
return wrapper
|
||||
elif inspect.isgeneratorfunction(fn):
|
||||
|
||||
@wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
# It's important that we inject this directly into the
|
||||
# generator's locals, as opposed to setting it here and then
|
||||
# doing 'yield from'. The reason is, if a generator is
|
||||
# throw()n into, then it may magically pop to the top of the
|
||||
# stack. And @contextmanager generators in particular are a
|
||||
# case where we often want KI protection, and which are often
|
||||
# thrown into! See:
|
||||
# https://bugs.python.org/issue29590
|
||||
gen = fn(*args, **kwargs)
|
||||
gen.gi_frame.f_locals[LOCALS_KEY_KI_PROTECTION_ENABLED] = enabled
|
||||
return gen
|
||||
|
||||
return wrapper
|
||||
elif async_generator.isasyncgenfunction(fn):
|
||||
|
||||
@wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
# See the comment for regular generators above
|
||||
agen = fn(*args, **kwargs)
|
||||
agen.ag_frame.f_locals[LOCALS_KEY_KI_PROTECTION_ENABLED] = enabled
|
||||
return agen
|
||||
|
||||
return wrapper
|
||||
else:
|
||||
|
||||
@wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = enabled
|
||||
return fn(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
enable_ki_protection = _ki_protection_decorator(True) # type: Callable[[F], F]
|
||||
enable_ki_protection.__name__ = "enable_ki_protection"
|
||||
|
||||
disable_ki_protection = _ki_protection_decorator(False) # type: Callable[[F], F]
|
||||
disable_ki_protection.__name__ = "disable_ki_protection"
|
||||
|
||||
|
||||
@attr.s
|
||||
class KIManager:
|
||||
handler = attr.ib(default=None)
|
||||
|
||||
def install(self, deliver_cb, restrict_keyboard_interrupt_to_checkpoints):
|
||||
assert self.handler is None
|
||||
if (
|
||||
not is_main_thread()
|
||||
or signal.getsignal(signal.SIGINT) != signal.default_int_handler
|
||||
):
|
||||
return
|
||||
|
||||
def handler(signum, frame):
|
||||
assert signum == signal.SIGINT
|
||||
protection_enabled = ki_protection_enabled(frame)
|
||||
if protection_enabled or restrict_keyboard_interrupt_to_checkpoints:
|
||||
deliver_cb()
|
||||
else:
|
||||
raise KeyboardInterrupt
|
||||
|
||||
self.handler = handler
|
||||
signal.signal(signal.SIGINT, handler)
|
||||
|
||||
def close(self):
|
||||
if self.handler is not None:
|
||||
if signal.getsignal(signal.SIGINT) is self.handler:
|
||||
signal.signal(signal.SIGINT, signal.default_int_handler)
|
||||
self.handler = None
|
||||
95
asq-env/lib/python3.9/site-packages/trio/_core/_local.py
Normal file
95
asq-env/lib/python3.9/site-packages/trio/_core/_local.py
Normal file
@@ -0,0 +1,95 @@
|
||||
# Runvar implementations
|
||||
import attr
|
||||
|
||||
from . import _run
|
||||
|
||||
from .._util import Final
|
||||
|
||||
|
||||
@attr.s(eq=False, hash=False, slots=True)
|
||||
class _RunVarToken:
|
||||
_no_value = object()
|
||||
|
||||
_var = attr.ib()
|
||||
previous_value = attr.ib(default=_no_value)
|
||||
redeemed = attr.ib(default=False, init=False)
|
||||
|
||||
@classmethod
|
||||
def empty(cls, var):
|
||||
return cls(var)
|
||||
|
||||
|
||||
@attr.s(eq=False, hash=False, slots=True)
|
||||
class RunVar(metaclass=Final):
|
||||
"""The run-local variant of a context variable.
|
||||
|
||||
:class:`RunVar` objects are similar to context variable objects,
|
||||
except that they are shared across a single call to :func:`trio.run`
|
||||
rather than a single task.
|
||||
|
||||
"""
|
||||
|
||||
_NO_DEFAULT = object()
|
||||
_name = attr.ib()
|
||||
_default = attr.ib(default=_NO_DEFAULT)
|
||||
|
||||
def get(self, default=_NO_DEFAULT):
|
||||
"""Gets the value of this :class:`RunVar` for the current run call."""
|
||||
try:
|
||||
return _run.GLOBAL_RUN_CONTEXT.runner._locals[self]
|
||||
except AttributeError:
|
||||
raise RuntimeError("Cannot be used outside of a run context") from None
|
||||
except KeyError:
|
||||
# contextvars consistency
|
||||
if default is not self._NO_DEFAULT:
|
||||
return default
|
||||
|
||||
if self._default is not self._NO_DEFAULT:
|
||||
return self._default
|
||||
|
||||
raise LookupError(self) from None
|
||||
|
||||
def set(self, value):
|
||||
"""Sets the value of this :class:`RunVar` for this current run
|
||||
call.
|
||||
|
||||
"""
|
||||
try:
|
||||
old_value = self.get()
|
||||
except LookupError:
|
||||
token = _RunVarToken.empty(self)
|
||||
else:
|
||||
token = _RunVarToken(self, old_value)
|
||||
|
||||
# This can't fail, because if we weren't in Trio context then the
|
||||
# get() above would have failed.
|
||||
_run.GLOBAL_RUN_CONTEXT.runner._locals[self] = value
|
||||
return token
|
||||
|
||||
def reset(self, token):
|
||||
"""Resets the value of this :class:`RunVar` to what it was
|
||||
previously specified by the token.
|
||||
|
||||
"""
|
||||
if token is None:
|
||||
raise TypeError("token must not be none")
|
||||
|
||||
if token.redeemed:
|
||||
raise ValueError("token has already been used")
|
||||
|
||||
if token._var is not self:
|
||||
raise ValueError("token is not for us")
|
||||
|
||||
previous = token.previous_value
|
||||
try:
|
||||
if previous is _RunVarToken._no_value:
|
||||
_run.GLOBAL_RUN_CONTEXT.runner._locals.pop(self)
|
||||
else:
|
||||
_run.GLOBAL_RUN_CONTEXT.runner._locals[self] = previous
|
||||
except AttributeError:
|
||||
raise RuntimeError("Cannot be used outside of a run context")
|
||||
|
||||
token.redeemed = True
|
||||
|
||||
def __repr__(self):
|
||||
return "<RunVar name={!r}>".format(self._name)
|
||||
165
asq-env/lib/python3.9/site-packages/trio/_core/_mock_clock.py
Normal file
165
asq-env/lib/python3.9/site-packages/trio/_core/_mock_clock.py
Normal file
@@ -0,0 +1,165 @@
|
||||
import time
|
||||
from math import inf
|
||||
|
||||
from .. import _core
|
||||
from ._run import GLOBAL_RUN_CONTEXT
|
||||
from .._abc import Clock
|
||||
from .._util import Final
|
||||
|
||||
################################################################
|
||||
# The glorious MockClock
|
||||
################################################################
|
||||
|
||||
|
||||
# Prior art:
|
||||
# https://twistedmatrix.com/documents/current/api/twisted.internet.task.Clock.html
|
||||
# https://github.com/ztellman/manifold/issues/57
|
||||
class MockClock(Clock, metaclass=Final):
|
||||
"""A user-controllable clock suitable for writing tests.
|
||||
|
||||
Args:
|
||||
rate (float): the initial :attr:`rate`.
|
||||
autojump_threshold (float): the initial :attr:`autojump_threshold`.
|
||||
|
||||
.. attribute:: rate
|
||||
|
||||
How many seconds of clock time pass per second of real time. Default is
|
||||
0.0, i.e. the clock only advances through manuals calls to :meth:`jump`
|
||||
or when the :attr:`autojump_threshold` is triggered. You can assign to
|
||||
this attribute to change it.
|
||||
|
||||
.. attribute:: autojump_threshold
|
||||
|
||||
The clock keeps an eye on the run loop, and if at any point it detects
|
||||
that all tasks have been blocked for this many real seconds (i.e.,
|
||||
according to the actual clock, not this clock), then the clock
|
||||
automatically jumps ahead to the run loop's next scheduled
|
||||
timeout. Default is :data:`math.inf`, i.e., to never autojump. You can
|
||||
assign to this attribute to change it.
|
||||
|
||||
Basically the idea is that if you have code or tests that use sleeps
|
||||
and timeouts, you can use this to make it run much faster, totally
|
||||
automatically. (At least, as long as those sleeps/timeouts are
|
||||
happening inside Trio; if your test involves talking to external
|
||||
service and waiting for it to timeout then obviously we can't help you
|
||||
there.)
|
||||
|
||||
You should set this to the smallest value that lets you reliably avoid
|
||||
"false alarms" where some I/O is in flight (e.g. between two halves of
|
||||
a socketpair) but the threshold gets triggered and time gets advanced
|
||||
anyway. This will depend on the details of your tests and test
|
||||
environment. If you aren't doing any I/O (like in our sleeping example
|
||||
above) then just set it to zero, and the clock will jump whenever all
|
||||
tasks are blocked.
|
||||
|
||||
.. note:: If you use ``autojump_threshold`` and
|
||||
`wait_all_tasks_blocked` at the same time, then you might wonder how
|
||||
they interact, since they both cause things to happen after the run
|
||||
loop goes idle for some time. The answer is:
|
||||
`wait_all_tasks_blocked` takes priority. If there's a task blocked
|
||||
in `wait_all_tasks_blocked`, then the autojump feature treats that
|
||||
as active task and does *not* jump the clock.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, rate=0.0, autojump_threshold=inf):
|
||||
# when the real clock said 'real_base', the virtual time was
|
||||
# 'virtual_base', and since then it's advanced at 'rate' virtual
|
||||
# seconds per real second.
|
||||
self._real_base = 0.0
|
||||
self._virtual_base = 0.0
|
||||
self._rate = 0.0
|
||||
self._autojump_threshold = 0.0
|
||||
# kept as an attribute so that our tests can monkeypatch it
|
||||
self._real_clock = time.perf_counter
|
||||
|
||||
# use the property update logic to set initial values
|
||||
self.rate = rate
|
||||
self.autojump_threshold = autojump_threshold
|
||||
|
||||
def __repr__(self):
|
||||
return "<MockClock, time={:.7f}, rate={} @ {:#x}>".format(
|
||||
self.current_time(), self._rate, id(self)
|
||||
)
|
||||
|
||||
@property
|
||||
def rate(self):
|
||||
return self._rate
|
||||
|
||||
@rate.setter
|
||||
def rate(self, new_rate):
|
||||
if new_rate < 0:
|
||||
raise ValueError("rate must be >= 0")
|
||||
else:
|
||||
real = self._real_clock()
|
||||
virtual = self._real_to_virtual(real)
|
||||
self._virtual_base = virtual
|
||||
self._real_base = real
|
||||
self._rate = float(new_rate)
|
||||
|
||||
@property
|
||||
def autojump_threshold(self):
|
||||
return self._autojump_threshold
|
||||
|
||||
@autojump_threshold.setter
|
||||
def autojump_threshold(self, new_autojump_threshold):
|
||||
self._autojump_threshold = float(new_autojump_threshold)
|
||||
self._try_resync_autojump_threshold()
|
||||
|
||||
# runner.clock_autojump_threshold is an internal API that isn't easily
|
||||
# usable by custom third-party Clock objects. If you need access to this
|
||||
# functionality, let us know, and we'll figure out how to make a public
|
||||
# API. Discussion:
|
||||
#
|
||||
# https://github.com/python-trio/trio/issues/1587
|
||||
def _try_resync_autojump_threshold(self):
|
||||
try:
|
||||
runner = GLOBAL_RUN_CONTEXT.runner
|
||||
if runner.is_guest:
|
||||
runner.force_guest_tick_asap()
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
runner.clock_autojump_threshold = self._autojump_threshold
|
||||
|
||||
# Invoked by the run loop when runner.clock_autojump_threshold is
|
||||
# exceeded.
|
||||
def _autojump(self):
|
||||
statistics = _core.current_statistics()
|
||||
jump = statistics.seconds_to_next_deadline
|
||||
if 0 < jump < inf:
|
||||
self.jump(jump)
|
||||
|
||||
def _real_to_virtual(self, real):
|
||||
real_offset = real - self._real_base
|
||||
virtual_offset = self._rate * real_offset
|
||||
return self._virtual_base + virtual_offset
|
||||
|
||||
def start_clock(self):
|
||||
self._try_resync_autojump_threshold()
|
||||
|
||||
def current_time(self):
|
||||
return self._real_to_virtual(self._real_clock())
|
||||
|
||||
def deadline_to_sleep_time(self, deadline):
|
||||
virtual_timeout = deadline - self.current_time()
|
||||
if virtual_timeout <= 0:
|
||||
return 0
|
||||
elif self._rate > 0:
|
||||
return virtual_timeout / self._rate
|
||||
else:
|
||||
return 999999999
|
||||
|
||||
def jump(self, seconds):
|
||||
"""Manually advance the clock by the given number of seconds.
|
||||
|
||||
Args:
|
||||
seconds (float): the number of seconds to jump the clock forward.
|
||||
|
||||
Raises:
|
||||
ValueError: if you try to pass a negative value for ``seconds``.
|
||||
|
||||
"""
|
||||
if seconds < 0:
|
||||
raise ValueError("time can't go backwards")
|
||||
self._virtual_base += seconds
|
||||
516
asq-env/lib/python3.9/site-packages/trio/_core/_multierror.py
Normal file
516
asq-env/lib/python3.9/site-packages/trio/_core/_multierror.py
Normal file
@@ -0,0 +1,516 @@
|
||||
import sys
|
||||
import traceback
|
||||
import textwrap
|
||||
import warnings
|
||||
|
||||
import attr
|
||||
|
||||
# python traceback.TracebackException < 3.6.4 does not support unhashable exceptions
|
||||
# see https://github.com/python/cpython/pull/4014 for details
|
||||
if sys.version_info < (3, 6, 4):
|
||||
exc_key = lambda exc: exc
|
||||
else:
|
||||
exc_key = id
|
||||
|
||||
################################################################
|
||||
# MultiError
|
||||
################################################################
|
||||
|
||||
|
||||
def _filter_impl(handler, root_exc):
|
||||
# We have a tree of MultiError's, like:
|
||||
#
|
||||
# MultiError([
|
||||
# ValueError,
|
||||
# MultiError([
|
||||
# KeyError,
|
||||
# ValueError,
|
||||
# ]),
|
||||
# ])
|
||||
#
|
||||
# or similar.
|
||||
#
|
||||
# We want to
|
||||
# 1) apply the filter to each of the leaf exceptions -- each leaf
|
||||
# might stay the same, be replaced (with the original exception
|
||||
# potentially sticking around as __context__ or __cause__), or
|
||||
# disappear altogether.
|
||||
# 2) simplify the resulting tree -- remove empty nodes, and replace
|
||||
# singleton MultiError's with their contents, e.g.:
|
||||
# MultiError([KeyError]) -> KeyError
|
||||
# (This can happen recursively, e.g. if the two ValueErrors above
|
||||
# get caught then we'll just be left with a bare KeyError.)
|
||||
# 3) preserve sensible tracebacks
|
||||
#
|
||||
# It's the tracebacks that are most confusing. As a MultiError
|
||||
# propagates through the stack, it accumulates traceback frames, but
|
||||
# the exceptions inside it don't. Semantically, the traceback for a
|
||||
# leaf exception is the concatenation the tracebacks of all the
|
||||
# exceptions you see when traversing the exception tree from the root
|
||||
# to that leaf. Our correctness invariant is that this concatenated
|
||||
# traceback should be the same before and after.
|
||||
#
|
||||
# The easy way to do that would be to, at the beginning of this
|
||||
# function, "push" all tracebacks down to the leafs, so all the
|
||||
# MultiErrors have __traceback__=None, and all the leafs have complete
|
||||
# tracebacks. But whenever possible, we'd actually prefer to keep
|
||||
# tracebacks as high up in the tree as possible, because this lets us
|
||||
# keep only a single copy of the common parts of these exception's
|
||||
# tracebacks. This is cheaper (in memory + time -- tracebacks are
|
||||
# unpleasantly quadratic-ish to work with, and this might matter if
|
||||
# you have thousands of exceptions, which can happen e.g. after
|
||||
# cancelling a large task pool, and no-one will ever look at their
|
||||
# tracebacks!), and more importantly, factoring out redundant parts of
|
||||
# the tracebacks makes them more readable if/when users do see them.
|
||||
#
|
||||
# So instead our strategy is:
|
||||
# - first go through and construct the new tree, preserving any
|
||||
# unchanged subtrees
|
||||
# - then go through the original tree (!) and push tracebacks down
|
||||
# until either we hit a leaf, or we hit a subtree which was
|
||||
# preserved in the new tree.
|
||||
|
||||
# This used to also support async handler functions. But that runs into:
|
||||
# https://bugs.python.org/issue29600
|
||||
# which is difficult to fix on our end.
|
||||
|
||||
# Filters a subtree, ignoring tracebacks, while keeping a record of
|
||||
# which MultiErrors were preserved unchanged
|
||||
def filter_tree(exc, preserved):
|
||||
if isinstance(exc, MultiError):
|
||||
new_exceptions = []
|
||||
changed = False
|
||||
for child_exc in exc.exceptions:
|
||||
new_child_exc = filter_tree(child_exc, preserved)
|
||||
if new_child_exc is not child_exc:
|
||||
changed = True
|
||||
if new_child_exc is not None:
|
||||
new_exceptions.append(new_child_exc)
|
||||
if not new_exceptions:
|
||||
return None
|
||||
elif changed:
|
||||
return MultiError(new_exceptions)
|
||||
else:
|
||||
preserved.add(id(exc))
|
||||
return exc
|
||||
else:
|
||||
new_exc = handler(exc)
|
||||
# Our version of implicit exception chaining
|
||||
if new_exc is not None and new_exc is not exc:
|
||||
new_exc.__context__ = exc
|
||||
return new_exc
|
||||
|
||||
def push_tb_down(tb, exc, preserved):
|
||||
if id(exc) in preserved:
|
||||
return
|
||||
new_tb = concat_tb(tb, exc.__traceback__)
|
||||
if isinstance(exc, MultiError):
|
||||
for child_exc in exc.exceptions:
|
||||
push_tb_down(new_tb, child_exc, preserved)
|
||||
exc.__traceback__ = None
|
||||
else:
|
||||
exc.__traceback__ = new_tb
|
||||
|
||||
preserved = set()
|
||||
new_root_exc = filter_tree(root_exc, preserved)
|
||||
push_tb_down(None, root_exc, preserved)
|
||||
# Delete the local functions to avoid a reference cycle (see
|
||||
# test_simple_cancel_scope_usage_doesnt_create_cyclic_garbage)
|
||||
del filter_tree, push_tb_down
|
||||
return new_root_exc
|
||||
|
||||
|
||||
# Normally I'm a big fan of (a)contextmanager, but in this case I found it
|
||||
# easier to use the raw context manager protocol, because it makes it a lot
|
||||
# easier to reason about how we're mutating the traceback as we go. (End
|
||||
# result: if the exception gets modified, then the 'raise' here makes this
|
||||
# frame show up in the traceback; otherwise, we leave no trace.)
|
||||
@attr.s(frozen=True)
|
||||
class MultiErrorCatcher:
|
||||
_handler = attr.ib()
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
def __exit__(self, etype, exc, tb):
|
||||
if exc is not None:
|
||||
filtered_exc = MultiError.filter(self._handler, exc)
|
||||
|
||||
if filtered_exc is exc:
|
||||
# Let the interpreter re-raise it
|
||||
return False
|
||||
if filtered_exc is None:
|
||||
# Swallow the exception
|
||||
return True
|
||||
# When we raise filtered_exc, Python will unconditionally blow
|
||||
# away its __context__ attribute and replace it with the original
|
||||
# exc we caught. So after we raise it, we have to pause it while
|
||||
# it's in flight to put the correct __context__ back.
|
||||
old_context = filtered_exc.__context__
|
||||
try:
|
||||
raise filtered_exc
|
||||
finally:
|
||||
_, value, _ = sys.exc_info()
|
||||
assert value is filtered_exc
|
||||
value.__context__ = old_context
|
||||
|
||||
|
||||
class MultiError(BaseException):
|
||||
"""An exception that contains other exceptions; also known as an
|
||||
"inception".
|
||||
|
||||
It's main use is to represent the situation when multiple child tasks all
|
||||
raise errors "in parallel".
|
||||
|
||||
Args:
|
||||
exceptions (list): The exceptions
|
||||
|
||||
Returns:
|
||||
If ``len(exceptions) == 1``, returns that exception. This means that a
|
||||
call to ``MultiError(...)`` is not guaranteed to return a
|
||||
:exc:`MultiError` object!
|
||||
|
||||
Otherwise, returns a new :exc:`MultiError` object.
|
||||
|
||||
Raises:
|
||||
TypeError: if any of the passed in objects are not instances of
|
||||
:exc:`BaseException`.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, exceptions):
|
||||
# Avoid recursion when exceptions[0] returned by __new__() happens
|
||||
# to be a MultiError and subsequently __init__() is called.
|
||||
if hasattr(self, "exceptions"):
|
||||
# __init__ was already called on this object
|
||||
assert len(exceptions) == 1 and exceptions[0] is self
|
||||
return
|
||||
self.exceptions = exceptions
|
||||
|
||||
def __new__(cls, exceptions):
|
||||
exceptions = list(exceptions)
|
||||
for exc in exceptions:
|
||||
if not isinstance(exc, BaseException):
|
||||
raise TypeError("Expected an exception object, not {!r}".format(exc))
|
||||
if len(exceptions) == 1:
|
||||
# If this lone object happens to itself be a MultiError, then
|
||||
# Python will implicitly call our __init__ on it again. See
|
||||
# special handling in __init__.
|
||||
return exceptions[0]
|
||||
else:
|
||||
# The base class __new__() implicitly invokes our __init__, which
|
||||
# is what we want.
|
||||
#
|
||||
# In an earlier version of the code, we didn't define __init__ and
|
||||
# simply set the `exceptions` attribute directly on the new object.
|
||||
# However, linters expect attributes to be initialized in __init__.
|
||||
return BaseException.__new__(cls, exceptions)
|
||||
|
||||
def __str__(self):
|
||||
return ", ".join(repr(exc) for exc in self.exceptions)
|
||||
|
||||
def __repr__(self):
|
||||
return "<MultiError: {}>".format(self)
|
||||
|
||||
@classmethod
|
||||
def filter(cls, handler, root_exc):
|
||||
"""Apply the given ``handler`` to all the exceptions in ``root_exc``.
|
||||
|
||||
Args:
|
||||
handler: A callable that takes an atomic (non-MultiError) exception
|
||||
as input, and returns either a new exception object or None.
|
||||
root_exc: An exception, often (though not necessarily) a
|
||||
:exc:`MultiError`.
|
||||
|
||||
Returns:
|
||||
A new exception object in which each component exception ``exc`` has
|
||||
been replaced by the result of running ``handler(exc)`` – or, if
|
||||
``handler`` returned None for all the inputs, returns None.
|
||||
|
||||
"""
|
||||
|
||||
return _filter_impl(handler, root_exc)
|
||||
|
||||
@classmethod
|
||||
def catch(cls, handler):
|
||||
"""Return a context manager that catches and re-throws exceptions
|
||||
after running :meth:`filter` on them.
|
||||
|
||||
Args:
|
||||
handler: as for :meth:`filter`
|
||||
|
||||
"""
|
||||
|
||||
return MultiErrorCatcher(handler)
|
||||
|
||||
|
||||
# Clean up exception printing:
|
||||
MultiError.__module__ = "trio"
|
||||
|
||||
################################################################
|
||||
# concat_tb
|
||||
################################################################
|
||||
|
||||
# We need to compute a new traceback that is the concatenation of two existing
|
||||
# tracebacks. This requires copying the entries in 'head' and then pointing
|
||||
# the final tb_next to 'tail'.
|
||||
#
|
||||
# NB: 'tail' might be None, which requires some special handling in the ctypes
|
||||
# version.
|
||||
#
|
||||
# The complication here is that Python doesn't actually support copying or
|
||||
# modifying traceback objects, so we have to get creative...
|
||||
#
|
||||
# On CPython, we use ctypes. On PyPy, we use "transparent proxies".
|
||||
#
|
||||
# Jinja2 is a useful source of inspiration:
|
||||
# https://github.com/pallets/jinja/blob/master/jinja2/debug.py
|
||||
|
||||
try:
|
||||
import tputil
|
||||
except ImportError:
|
||||
have_tproxy = False
|
||||
else:
|
||||
have_tproxy = True
|
||||
|
||||
if have_tproxy:
|
||||
# http://doc.pypy.org/en/latest/objspace-proxies.html
|
||||
def copy_tb(base_tb, tb_next):
|
||||
def controller(operation):
|
||||
# Rationale for pragma: I looked fairly carefully and tried a few
|
||||
# things, and AFAICT it's not actually possible to get any
|
||||
# 'opname' that isn't __getattr__ or __getattribute__. So there's
|
||||
# no missing test we could add, and no value in coverage nagging
|
||||
# us about adding one.
|
||||
if operation.opname in [
|
||||
"__getattribute__",
|
||||
"__getattr__",
|
||||
]: # pragma: no cover
|
||||
if operation.args[0] == "tb_next":
|
||||
return tb_next
|
||||
return operation.delegate()
|
||||
|
||||
return tputil.make_proxy(controller, type(base_tb), base_tb)
|
||||
|
||||
|
||||
else:
|
||||
# ctypes it is
|
||||
import ctypes
|
||||
|
||||
# How to handle refcounting? I don't want to use ctypes.py_object because
|
||||
# I don't understand or trust it, and I don't want to use
|
||||
# ctypes.pythonapi.Py_{Inc,Dec}Ref because we might clash with user code
|
||||
# that also tries to use them but with different types. So private _ctypes
|
||||
# APIs it is!
|
||||
import _ctypes
|
||||
|
||||
class CTraceback(ctypes.Structure):
|
||||
_fields_ = [
|
||||
("PyObject_HEAD", ctypes.c_byte * object().__sizeof__()),
|
||||
("tb_next", ctypes.c_void_p),
|
||||
("tb_frame", ctypes.c_void_p),
|
||||
("tb_lasti", ctypes.c_int),
|
||||
("tb_lineno", ctypes.c_int),
|
||||
]
|
||||
|
||||
def copy_tb(base_tb, tb_next):
|
||||
# TracebackType has no public constructor, so allocate one the hard way
|
||||
try:
|
||||
raise ValueError
|
||||
except ValueError as exc:
|
||||
new_tb = exc.__traceback__
|
||||
c_new_tb = CTraceback.from_address(id(new_tb))
|
||||
|
||||
# At the C level, tb_next either pointer to the next traceback or is
|
||||
# NULL. c_void_p and the .tb_next accessor both convert NULL to None,
|
||||
# but we shouldn't DECREF None just because we assigned to a NULL
|
||||
# pointer! Here we know that our new traceback has only 1 frame in it,
|
||||
# so we can assume the tb_next field is NULL.
|
||||
assert c_new_tb.tb_next is None
|
||||
# If tb_next is None, then we want to set c_new_tb.tb_next to NULL,
|
||||
# which it already is, so we're done. Otherwise, we have to actually
|
||||
# do some work:
|
||||
if tb_next is not None:
|
||||
_ctypes.Py_INCREF(tb_next)
|
||||
c_new_tb.tb_next = id(tb_next)
|
||||
|
||||
assert c_new_tb.tb_frame is not None
|
||||
_ctypes.Py_INCREF(base_tb.tb_frame)
|
||||
old_tb_frame = new_tb.tb_frame
|
||||
c_new_tb.tb_frame = id(base_tb.tb_frame)
|
||||
_ctypes.Py_DECREF(old_tb_frame)
|
||||
|
||||
c_new_tb.tb_lasti = base_tb.tb_lasti
|
||||
c_new_tb.tb_lineno = base_tb.tb_lineno
|
||||
|
||||
return new_tb
|
||||
|
||||
|
||||
def concat_tb(head, tail):
|
||||
# We have to use an iterative algorithm here, because in the worst case
|
||||
# this might be a RecursionError stack that is by definition too deep to
|
||||
# process by recursion!
|
||||
head_tbs = []
|
||||
pointer = head
|
||||
while pointer is not None:
|
||||
head_tbs.append(pointer)
|
||||
pointer = pointer.tb_next
|
||||
current_head = tail
|
||||
for head_tb in reversed(head_tbs):
|
||||
current_head = copy_tb(head_tb, tb_next=current_head)
|
||||
return current_head
|
||||
|
||||
|
||||
################################################################
|
||||
# MultiError traceback formatting
|
||||
#
|
||||
# What follows is terrible, terrible monkey patching of
|
||||
# traceback.TracebackException to add support for handling
|
||||
# MultiErrors
|
||||
################################################################
|
||||
|
||||
traceback_exception_original_init = traceback.TracebackException.__init__
|
||||
|
||||
|
||||
def traceback_exception_init(
|
||||
self,
|
||||
exc_type,
|
||||
exc_value,
|
||||
exc_traceback,
|
||||
*,
|
||||
limit=None,
|
||||
lookup_lines=True,
|
||||
capture_locals=False,
|
||||
compact=False,
|
||||
_seen=None,
|
||||
):
|
||||
if sys.version_info >= (3, 10):
|
||||
kwargs = {"compact": compact}
|
||||
else:
|
||||
kwargs = {}
|
||||
|
||||
# Capture the original exception and its cause and context as TracebackExceptions
|
||||
traceback_exception_original_init(
|
||||
self,
|
||||
exc_type,
|
||||
exc_value,
|
||||
exc_traceback,
|
||||
limit=limit,
|
||||
lookup_lines=lookup_lines,
|
||||
capture_locals=capture_locals,
|
||||
_seen=_seen,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
seen_was_none = _seen is None
|
||||
|
||||
if _seen is None:
|
||||
_seen = set()
|
||||
|
||||
# Capture each of the exceptions in the MultiError along with each of their causes and contexts
|
||||
if isinstance(exc_value, MultiError):
|
||||
embedded = []
|
||||
for exc in exc_value.exceptions:
|
||||
if exc_key(exc) not in _seen:
|
||||
embedded.append(
|
||||
traceback.TracebackException.from_exception(
|
||||
exc,
|
||||
limit=limit,
|
||||
lookup_lines=lookup_lines,
|
||||
capture_locals=capture_locals,
|
||||
# copy the set of _seen exceptions so that duplicates
|
||||
# shared between sub-exceptions are not omitted
|
||||
_seen=None if seen_was_none else set(_seen),
|
||||
)
|
||||
)
|
||||
self.embedded = embedded
|
||||
else:
|
||||
self.embedded = []
|
||||
|
||||
|
||||
traceback.TracebackException.__init__ = traceback_exception_init # type: ignore
|
||||
traceback_exception_original_format = traceback.TracebackException.format
|
||||
|
||||
|
||||
def traceback_exception_format(self, *, chain=True):
|
||||
yield from traceback_exception_original_format(self, chain=chain)
|
||||
|
||||
for i, exc in enumerate(self.embedded):
|
||||
yield "\nDetails of embedded exception {}:\n\n".format(i + 1)
|
||||
yield from (textwrap.indent(line, " " * 2) for line in exc.format(chain=chain))
|
||||
|
||||
|
||||
traceback.TracebackException.format = traceback_exception_format # type: ignore
|
||||
|
||||
|
||||
def trio_excepthook(etype, value, tb):
|
||||
for chunk in traceback.format_exception(etype, value, tb):
|
||||
sys.stderr.write(chunk)
|
||||
|
||||
|
||||
monkeypatched_or_warned = False
|
||||
|
||||
if "IPython" in sys.modules:
|
||||
import IPython
|
||||
|
||||
ip = IPython.get_ipython()
|
||||
if ip is not None:
|
||||
if ip.custom_exceptions != ():
|
||||
warnings.warn(
|
||||
"IPython detected, but you already have a custom exception "
|
||||
"handler installed. I'll skip installing Trio's custom "
|
||||
"handler, but this means MultiErrors will not show full "
|
||||
"tracebacks.",
|
||||
category=RuntimeWarning,
|
||||
)
|
||||
monkeypatched_or_warned = True
|
||||
else:
|
||||
|
||||
def trio_show_traceback(self, etype, value, tb, tb_offset=None):
|
||||
# XX it would be better to integrate with IPython's fancy
|
||||
# exception formatting stuff (and not ignore tb_offset)
|
||||
trio_excepthook(etype, value, tb)
|
||||
|
||||
ip.set_custom_exc((MultiError,), trio_show_traceback)
|
||||
monkeypatched_or_warned = True
|
||||
|
||||
if sys.excepthook is sys.__excepthook__:
|
||||
sys.excepthook = trio_excepthook
|
||||
monkeypatched_or_warned = True
|
||||
|
||||
# Ubuntu's system Python has a sitecustomize.py file that import
|
||||
# apport_python_hook and replaces sys.excepthook.
|
||||
#
|
||||
# The custom hook captures the error for crash reporting, and then calls
|
||||
# sys.__excepthook__ to actually print the error.
|
||||
#
|
||||
# We don't mind it capturing the error for crash reporting, but we want to
|
||||
# take over printing the error. So we monkeypatch the apport_python_hook
|
||||
# module so that instead of calling sys.__excepthook__, it calls our custom
|
||||
# hook.
|
||||
#
|
||||
# More details: https://github.com/python-trio/trio/issues/1065
|
||||
if getattr(sys.excepthook, "__name__", None) == "apport_excepthook":
|
||||
import apport_python_hook
|
||||
|
||||
assert sys.excepthook is apport_python_hook.apport_excepthook
|
||||
|
||||
# Give it a descriptive name as a hint for anyone who's stuck trying to
|
||||
# debug this mess later.
|
||||
class TrioFakeSysModuleForApport:
|
||||
pass
|
||||
|
||||
fake_sys = TrioFakeSysModuleForApport()
|
||||
fake_sys.__dict__.update(sys.__dict__)
|
||||
fake_sys.__excepthook__ = trio_excepthook # type: ignore
|
||||
apport_python_hook.sys = fake_sys
|
||||
|
||||
monkeypatched_or_warned = True
|
||||
|
||||
if not monkeypatched_or_warned:
|
||||
warnings.warn(
|
||||
"You seem to already have a custom sys.excepthook handler "
|
||||
"installed. I'll skip installing Trio's custom handler, but this "
|
||||
"means MultiErrors will not show full tracebacks.",
|
||||
category=RuntimeWarning,
|
||||
)
|
||||
218
asq-env/lib/python3.9/site-packages/trio/_core/_parking_lot.py
Normal file
218
asq-env/lib/python3.9/site-packages/trio/_core/_parking_lot.py
Normal file
@@ -0,0 +1,218 @@
|
||||
# ParkingLot provides an abstraction for a fair waitqueue with cancellation
|
||||
# and requeueing support. Inspiration:
|
||||
#
|
||||
# https://webkit.org/blog/6161/locking-in-webkit/
|
||||
# https://amanieu.github.io/parking_lot/
|
||||
#
|
||||
# which were in turn heavily influenced by
|
||||
#
|
||||
# http://gee.cs.oswego.edu/dl/papers/aqs.pdf
|
||||
#
|
||||
# Compared to these, our use of cooperative scheduling allows some
|
||||
# simplifications (no need for internal locking). On the other hand, the need
|
||||
# to support Trio's strong cancellation semantics adds some complications
|
||||
# (tasks need to know where they're queued so they can cancel). Also, in the
|
||||
# above work, the ParkingLot is a global structure that holds a collection of
|
||||
# waitqueues keyed by lock address, and which are opportunistically allocated
|
||||
# and destroyed as contention arises; this allows the worst-case memory usage
|
||||
# for all waitqueues to be O(#tasks). Here we allocate a separate wait queue
|
||||
# for each synchronization object, so we're O(#objects + #tasks). This isn't
|
||||
# *so* bad since compared to our synchronization objects are heavier than
|
||||
# theirs and our tasks are lighter, so for us #objects is smaller and #tasks
|
||||
# is larger.
|
||||
#
|
||||
# This is in the core because for two reasons. First, it's used by
|
||||
# UnboundedQueue, and UnboundedQueue is used for a number of things in the
|
||||
# core. And second, it's responsible for providing fairness to all of our
|
||||
# high-level synchronization primitives (locks, queues, etc.). For now with
|
||||
# our FIFO scheduler this is relatively trivial (it's just a FIFO waitqueue),
|
||||
# but in the future we ever start support task priorities or fair scheduling
|
||||
#
|
||||
# https://github.com/python-trio/trio/issues/32
|
||||
#
|
||||
# then all we'll have to do is update this. (Well, full-fledged task
|
||||
# priorities might also require priority inheritance, which would require more
|
||||
# work.)
|
||||
#
|
||||
# For discussion of data structures to use here, see:
|
||||
#
|
||||
# https://github.com/dabeaz/curio/issues/136
|
||||
#
|
||||
# (and also the articles above). Currently we use a SortedDict ordered by a
|
||||
# global monotonic counter that ensures FIFO ordering. The main advantage of
|
||||
# this is that it's easy to implement :-). An intrusive doubly-linked list
|
||||
# would also be a natural approach, so long as we only handle FIFO ordering.
|
||||
#
|
||||
# XX: should we switch to the shared global ParkingLot approach?
|
||||
#
|
||||
# XX: we should probably add support for "parking tokens" to allow for
|
||||
# task-fair RWlock (basically: when parking a task needs to be able to mark
|
||||
# itself as a reader or a writer, and then a task-fair wakeup policy is, wake
|
||||
# the next task, and if it's a reader than keep waking tasks so long as they
|
||||
# are readers). Without this I think you can implement write-biased or
|
||||
# read-biased RWlocks (by using two parking lots and drawing from whichever is
|
||||
# preferred), but not task-fair -- and task-fair plays much more nicely with
|
||||
# WFQ. (Consider what happens in the two-lot implementation if you're
|
||||
# write-biased but all the pending writers are blocked at the scheduler level
|
||||
# by the WFQ logic...)
|
||||
# ...alternatively, "phase-fair" RWlocks are pretty interesting:
|
||||
# http://www.cs.unc.edu/~anderson/papers/ecrts09b.pdf
|
||||
# Useful summary:
|
||||
# https://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReadWriteLock.html
|
||||
#
|
||||
# XX: if we do add WFQ, then we might have to drop the current feature where
|
||||
# unpark returns the tasks that were unparked. Rationale: suppose that at the
|
||||
# time we call unpark, the next task is deprioritized... and then, before it
|
||||
# becomes runnable, a new task parks which *is* runnable. Ideally we should
|
||||
# immediately wake the new task, and leave the old task on the queue for
|
||||
# later. But this means we can't commit to which task we are unparking when
|
||||
# unpark is called.
|
||||
#
|
||||
# See: https://github.com/python-trio/trio/issues/53
|
||||
|
||||
from itertools import count
|
||||
import attr
|
||||
from collections import OrderedDict
|
||||
|
||||
from .. import _core
|
||||
from .._util import Final
|
||||
|
||||
_counter = count()
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True)
|
||||
class _ParkingLotStatistics:
|
||||
tasks_waiting = attr.ib()
|
||||
|
||||
|
||||
@attr.s(eq=False, hash=False, slots=True)
|
||||
class ParkingLot(metaclass=Final):
|
||||
"""A fair wait queue with cancellation and requeueing.
|
||||
|
||||
This class encapsulates the tricky parts of implementing a wait
|
||||
queue. It's useful for implementing higher-level synchronization
|
||||
primitives like queues and locks.
|
||||
|
||||
In addition to the methods below, you can use ``len(parking_lot)`` to get
|
||||
the number of parked tasks, and ``if parking_lot: ...`` to check whether
|
||||
there are any parked tasks.
|
||||
|
||||
"""
|
||||
|
||||
# {task: None}, we just want a deque where we can quickly delete random
|
||||
# items
|
||||
_parked = attr.ib(factory=OrderedDict, init=False)
|
||||
|
||||
def __len__(self):
|
||||
"""Returns the number of parked tasks."""
|
||||
return len(self._parked)
|
||||
|
||||
def __bool__(self):
|
||||
"""True if there are parked tasks, False otherwise."""
|
||||
return bool(self._parked)
|
||||
|
||||
# XX this currently returns None
|
||||
# if we ever add the ability to repark while one's resuming place in
|
||||
# line (for false wakeups), then we could have it return a ticket that
|
||||
# abstracts the "place in line" concept.
|
||||
@_core.enable_ki_protection
|
||||
async def park(self):
|
||||
"""Park the current task until woken by a call to :meth:`unpark` or
|
||||
:meth:`unpark_all`.
|
||||
|
||||
"""
|
||||
task = _core.current_task()
|
||||
self._parked[task] = None
|
||||
task.custom_sleep_data = self
|
||||
|
||||
def abort_fn(_):
|
||||
del task.custom_sleep_data._parked[task]
|
||||
return _core.Abort.SUCCEEDED
|
||||
|
||||
await _core.wait_task_rescheduled(abort_fn)
|
||||
|
||||
def _pop_several(self, count):
|
||||
for _ in range(min(count, len(self._parked))):
|
||||
task, _ = self._parked.popitem(last=False)
|
||||
yield task
|
||||
|
||||
@_core.enable_ki_protection
|
||||
def unpark(self, *, count=1):
|
||||
"""Unpark one or more tasks.
|
||||
|
||||
This wakes up ``count`` tasks that are blocked in :meth:`park`. If
|
||||
there are fewer than ``count`` tasks parked, then wakes as many tasks
|
||||
are available and then returns successfully.
|
||||
|
||||
Args:
|
||||
count (int): the number of tasks to unpark.
|
||||
|
||||
"""
|
||||
tasks = list(self._pop_several(count))
|
||||
for task in tasks:
|
||||
_core.reschedule(task)
|
||||
return tasks
|
||||
|
||||
def unpark_all(self):
|
||||
"""Unpark all parked tasks."""
|
||||
return self.unpark(count=len(self))
|
||||
|
||||
@_core.enable_ki_protection
|
||||
def repark(self, new_lot, *, count=1):
|
||||
"""Move parked tasks from one :class:`ParkingLot` object to another.
|
||||
|
||||
This dequeues ``count`` tasks from one lot, and requeues them on
|
||||
another, preserving order. For example::
|
||||
|
||||
async def parker(lot):
|
||||
print("sleeping")
|
||||
await lot.park()
|
||||
print("woken")
|
||||
|
||||
async def main():
|
||||
lot1 = trio.lowlevel.ParkingLot()
|
||||
lot2 = trio.lowlevel.ParkingLot()
|
||||
async with trio.open_nursery() as nursery:
|
||||
nursery.start_soon(parker, lot1)
|
||||
await trio.testing.wait_all_tasks_blocked()
|
||||
assert len(lot1) == 1
|
||||
assert len(lot2) == 0
|
||||
lot1.repark(lot2)
|
||||
assert len(lot1) == 0
|
||||
assert len(lot2) == 1
|
||||
# This wakes up the task that was originally parked in lot1
|
||||
lot2.unpark()
|
||||
|
||||
If there are fewer than ``count`` tasks parked, then reparks as many
|
||||
tasks as are available and then returns successfully.
|
||||
|
||||
Args:
|
||||
new_lot (ParkingLot): the parking lot to move tasks to.
|
||||
count (int): the number of tasks to move.
|
||||
|
||||
"""
|
||||
if not isinstance(new_lot, ParkingLot):
|
||||
raise TypeError("new_lot must be a ParkingLot")
|
||||
for task in self._pop_several(count):
|
||||
new_lot._parked[task] = None
|
||||
task.custom_sleep_data = new_lot
|
||||
|
||||
def repark_all(self, new_lot):
|
||||
"""Move all parked tasks from one :class:`ParkingLot` object to
|
||||
another.
|
||||
|
||||
See :meth:`repark` for details.
|
||||
|
||||
"""
|
||||
return self.repark(new_lot, count=len(self))
|
||||
|
||||
def statistics(self):
|
||||
"""Return an object containing debugging information.
|
||||
|
||||
Currently the following fields are defined:
|
||||
|
||||
* ``tasks_waiting``: The number of tasks blocked on this lot's
|
||||
:meth:`park` method.
|
||||
|
||||
"""
|
||||
return _ParkingLotStatistics(tasks_waiting=len(self._parked))
|
||||
2379
asq-env/lib/python3.9/site-packages/trio/_core/_run.py
Normal file
2379
asq-env/lib/python3.9/site-packages/trio/_core/_run.py
Normal file
File diff suppressed because it is too large
Load Diff
171
asq-env/lib/python3.9/site-packages/trio/_core/_thread_cache.py
Normal file
171
asq-env/lib/python3.9/site-packages/trio/_core/_thread_cache.py
Normal file
@@ -0,0 +1,171 @@
|
||||
from threading import Thread, Lock
|
||||
import outcome
|
||||
from itertools import count
|
||||
|
||||
# The "thread cache" is a simple unbounded thread pool, i.e., it automatically
|
||||
# spawns as many threads as needed to handle all the requests its given. Its
|
||||
# only purpose is to cache worker threads so that they don't have to be
|
||||
# started from scratch every time we want to delegate some work to a thread.
|
||||
# It's expected that some higher-level code will track how many threads are in
|
||||
# use to avoid overwhelming the system (e.g. the limiter= argument to
|
||||
# trio.to_thread.run_sync).
|
||||
#
|
||||
# To maximize sharing, there's only one thread cache per process, even if you
|
||||
# have multiple calls to trio.run.
|
||||
#
|
||||
# Guarantees:
|
||||
#
|
||||
# It's safe to call start_thread_soon simultaneously from
|
||||
# multiple threads.
|
||||
#
|
||||
# Idle threads are chosen in LIFO order, i.e. we *don't* spread work evenly
|
||||
# over all threads. Instead we try to let some threads do most of the work
|
||||
# while others sit idle as much as possible. Compared to FIFO, this has better
|
||||
# memory cache behavior, and it makes it easier to detect when we have too
|
||||
# many threads, so idle ones can exit.
|
||||
#
|
||||
# This code assumes that 'dict' has the following properties:
|
||||
#
|
||||
# - __setitem__, __delitem__, and popitem are all thread-safe and atomic with
|
||||
# respect to each other. This is guaranteed by the GIL.
|
||||
#
|
||||
# - popitem returns the most-recently-added item (i.e., __setitem__ + popitem
|
||||
# give you a LIFO queue). This relies on dicts being insertion-ordered, like
|
||||
# they are in py36+.
|
||||
|
||||
# How long a thread will idle waiting for new work before gives up and exits.
|
||||
# This value is pretty arbitrary; I don't think it matters too much.
|
||||
IDLE_TIMEOUT = 10 # seconds
|
||||
|
||||
name_counter = count()
|
||||
|
||||
|
||||
class WorkerThread:
|
||||
def __init__(self, thread_cache):
|
||||
self._job = None
|
||||
self._thread_cache = thread_cache
|
||||
# This Lock is used in an unconventional way.
|
||||
#
|
||||
# "Unlocked" means we have a pending job that's been assigned to us;
|
||||
# "locked" means that we don't.
|
||||
#
|
||||
# Initially we have no job, so it starts out in locked state.
|
||||
self._worker_lock = Lock()
|
||||
self._worker_lock.acquire()
|
||||
thread = Thread(target=self._work, daemon=True)
|
||||
thread.name = f"Trio worker thread {next(name_counter)}"
|
||||
thread.start()
|
||||
|
||||
def _handle_job(self):
|
||||
# Handle job in a separate method to ensure user-created
|
||||
# objects are cleaned up in a consistent manner.
|
||||
fn, deliver = self._job
|
||||
self._job = None
|
||||
result = outcome.capture(fn)
|
||||
# Tell the cache that we're available to be assigned a new
|
||||
# job. We do this *before* calling 'deliver', so that if
|
||||
# 'deliver' triggers a new job, it can be assigned to us
|
||||
# instead of spawning a new thread.
|
||||
self._thread_cache._idle_workers[self] = None
|
||||
deliver(result)
|
||||
|
||||
def _work(self):
|
||||
while True:
|
||||
if self._worker_lock.acquire(timeout=IDLE_TIMEOUT):
|
||||
# We got a job
|
||||
self._handle_job()
|
||||
else:
|
||||
# Timeout acquiring lock, so we can probably exit. But,
|
||||
# there's a race condition: we might be assigned a job *just*
|
||||
# as we're about to exit. So we have to check.
|
||||
try:
|
||||
del self._thread_cache._idle_workers[self]
|
||||
except KeyError:
|
||||
# Someone else removed us from the idle worker queue, so
|
||||
# they must be in the process of assigning us a job - loop
|
||||
# around and wait for it.
|
||||
continue
|
||||
else:
|
||||
# We successfully removed ourselves from the idle
|
||||
# worker queue, so no more jobs are incoming; it's safe to
|
||||
# exit.
|
||||
return
|
||||
|
||||
|
||||
class ThreadCache:
|
||||
def __init__(self):
|
||||
self._idle_workers = {}
|
||||
|
||||
def start_thread_soon(self, fn, deliver):
|
||||
try:
|
||||
worker, _ = self._idle_workers.popitem()
|
||||
except KeyError:
|
||||
worker = WorkerThread(self)
|
||||
worker._job = (fn, deliver)
|
||||
worker._worker_lock.release()
|
||||
|
||||
|
||||
THREAD_CACHE = ThreadCache()
|
||||
|
||||
|
||||
def start_thread_soon(fn, deliver):
|
||||
"""Runs ``deliver(outcome.capture(fn))`` in a worker thread.
|
||||
|
||||
Generally ``fn`` does some blocking work, and ``deliver`` delivers the
|
||||
result back to whoever is interested.
|
||||
|
||||
This is a low-level, no-frills interface, very similar to using
|
||||
`threading.Thread` to spawn a thread directly. The main difference is
|
||||
that this function tries to re-use threads when possible, so it can be
|
||||
a bit faster than `threading.Thread`.
|
||||
|
||||
Worker threads have the `~threading.Thread.daemon` flag set, which means
|
||||
that if your main thread exits, worker threads will automatically be
|
||||
killed. If you want to make sure that your ``fn`` runs to completion, then
|
||||
you should make sure that the main thread remains alive until ``deliver``
|
||||
is called.
|
||||
|
||||
It is safe to call this function simultaneously from multiple threads.
|
||||
|
||||
Args:
|
||||
|
||||
fn (sync function): Performs arbitrary blocking work.
|
||||
|
||||
deliver (sync function): Takes the `outcome.Outcome` of ``fn``, and
|
||||
delivers it. *Must not block.*
|
||||
|
||||
Because worker threads are cached and reused for multiple calls, neither
|
||||
function should mutate thread-level state, like `threading.local` objects
|
||||
– or if they do, they should be careful to revert their changes before
|
||||
returning.
|
||||
|
||||
Note:
|
||||
|
||||
The split between ``fn`` and ``deliver`` serves two purposes. First,
|
||||
it's convenient, since most callers need something like this anyway.
|
||||
|
||||
Second, it avoids a small race condition that could cause too many
|
||||
threads to be spawned. Consider a program that wants to run several
|
||||
jobs sequentially on a thread, so the main thread submits a job, waits
|
||||
for it to finish, submits another job, etc. In theory, this program
|
||||
should only need one worker thread. But what could happen is:
|
||||
|
||||
1. Worker thread: First job finishes, and calls ``deliver``.
|
||||
|
||||
2. Main thread: receives notification that the job finished, and calls
|
||||
``start_thread_soon``.
|
||||
|
||||
3. Main thread: sees that no worker threads are marked idle, so spawns
|
||||
a second worker thread.
|
||||
|
||||
4. Original worker thread: marks itself as idle.
|
||||
|
||||
To avoid this, threads mark themselves as idle *before* calling
|
||||
``deliver``.
|
||||
|
||||
Is this potential extra thread a major problem? Maybe not, but it's
|
||||
easy enough to avoid, and we figure that if the user is trying to
|
||||
limit how many threads they're using then it's polite to respect that.
|
||||
|
||||
"""
|
||||
THREAD_CACHE.start_thread_soon(fn, deliver)
|
||||
270
asq-env/lib/python3.9/site-packages/trio/_core/_traps.py
Normal file
270
asq-env/lib/python3.9/site-packages/trio/_core/_traps.py
Normal file
@@ -0,0 +1,270 @@
|
||||
# These are the only functions that ever yield back to the task runner.
|
||||
|
||||
import types
|
||||
import enum
|
||||
|
||||
import attr
|
||||
import outcome
|
||||
|
||||
from . import _run
|
||||
|
||||
|
||||
# Helper for the bottommost 'yield'. You can't use 'yield' inside an async
|
||||
# function, but you can inside a generator, and if you decorate your generator
|
||||
# with @types.coroutine, then it's even awaitable. However, it's still not a
|
||||
# real async function: in particular, it isn't recognized by
|
||||
# inspect.iscoroutinefunction, and it doesn't trigger the unawaited coroutine
|
||||
# tracking machinery. Since our traps are public APIs, we make them real async
|
||||
# functions, and then this helper takes care of the actual yield:
|
||||
@types.coroutine
|
||||
def _async_yield(obj):
|
||||
return (yield obj)
|
||||
|
||||
|
||||
# This class object is used as a singleton.
|
||||
# Not exported in the trio._core namespace, but imported directly by _run.
|
||||
class CancelShieldedCheckpoint:
|
||||
pass
|
||||
|
||||
|
||||
async def cancel_shielded_checkpoint():
|
||||
"""Introduce a schedule point, but not a cancel point.
|
||||
|
||||
This is *not* a :ref:`checkpoint <checkpoints>`, but it is half of a
|
||||
checkpoint, and when combined with :func:`checkpoint_if_cancelled` it can
|
||||
make a full checkpoint.
|
||||
|
||||
Equivalent to (but potentially more efficient than)::
|
||||
|
||||
with trio.CancelScope(shield=True):
|
||||
await trio.lowlevel.checkpoint()
|
||||
|
||||
"""
|
||||
return (await _async_yield(CancelShieldedCheckpoint)).unwrap()
|
||||
|
||||
|
||||
# Return values for abort functions
|
||||
class Abort(enum.Enum):
|
||||
""":class:`enum.Enum` used as the return value from abort functions.
|
||||
|
||||
See :func:`wait_task_rescheduled` for details.
|
||||
|
||||
.. data:: SUCCEEDED
|
||||
FAILED
|
||||
|
||||
"""
|
||||
|
||||
SUCCEEDED = 1
|
||||
FAILED = 2
|
||||
|
||||
|
||||
# Not exported in the trio._core namespace, but imported directly by _run.
|
||||
@attr.s(frozen=True)
|
||||
class WaitTaskRescheduled:
|
||||
abort_func = attr.ib()
|
||||
|
||||
|
||||
async def wait_task_rescheduled(abort_func):
|
||||
"""Put the current task to sleep, with cancellation support.
|
||||
|
||||
This is the lowest-level API for blocking in Trio. Every time a
|
||||
:class:`~trio.lowlevel.Task` blocks, it does so by calling this function
|
||||
(usually indirectly via some higher-level API).
|
||||
|
||||
This is a tricky interface with no guard rails. If you can use
|
||||
:class:`ParkingLot` or the built-in I/O wait functions instead, then you
|
||||
should.
|
||||
|
||||
Generally the way it works is that before calling this function, you make
|
||||
arrangements for "someone" to call :func:`reschedule` on the current task
|
||||
at some later point.
|
||||
|
||||
Then you call :func:`wait_task_rescheduled`, passing in ``abort_func``, an
|
||||
"abort callback".
|
||||
|
||||
(Terminology: in Trio, "aborting" is the process of attempting to
|
||||
interrupt a blocked task to deliver a cancellation.)
|
||||
|
||||
There are two possibilities for what happens next:
|
||||
|
||||
1. "Someone" calls :func:`reschedule` on the current task, and
|
||||
:func:`wait_task_rescheduled` returns or raises whatever value or error
|
||||
was passed to :func:`reschedule`.
|
||||
|
||||
2. The call's context transitions to a cancelled state (e.g. due to a
|
||||
timeout expiring). When this happens, the ``abort_func`` is called. Its
|
||||
interface looks like::
|
||||
|
||||
def abort_func(raise_cancel):
|
||||
...
|
||||
return trio.lowlevel.Abort.SUCCEEDED # or FAILED
|
||||
|
||||
It should attempt to clean up any state associated with this call, and
|
||||
in particular, arrange that :func:`reschedule` will *not* be called
|
||||
later. If (and only if!) it is successful, then it should return
|
||||
:data:`Abort.SUCCEEDED`, in which case the task will automatically be
|
||||
rescheduled with an appropriate :exc:`~trio.Cancelled` error.
|
||||
|
||||
Otherwise, it should return :data:`Abort.FAILED`. This means that the
|
||||
task can't be cancelled at this time, and still has to make sure that
|
||||
"someone" eventually calls :func:`reschedule`.
|
||||
|
||||
At that point there are again two possibilities. You can simply ignore
|
||||
the cancellation altogether: wait for the operation to complete and
|
||||
then reschedule and continue as normal. (For example, this is what
|
||||
:func:`trio.to_thread.run_sync` does if cancellation is disabled.)
|
||||
The other possibility is that the ``abort_func`` does succeed in
|
||||
cancelling the operation, but for some reason isn't able to report that
|
||||
right away. (Example: on Windows, it's possible to request that an
|
||||
async ("overlapped") I/O operation be cancelled, but this request is
|
||||
*also* asynchronous – you don't find out until later whether the
|
||||
operation was actually cancelled or not.) To report a delayed
|
||||
cancellation, then you should reschedule the task yourself, and call
|
||||
the ``raise_cancel`` callback passed to ``abort_func`` to raise a
|
||||
:exc:`~trio.Cancelled` (or possibly :exc:`KeyboardInterrupt`) exception
|
||||
into this task. Either of the approaches sketched below can work::
|
||||
|
||||
# Option 1:
|
||||
# Catch the exception from raise_cancel and inject it into the task.
|
||||
# (This is what Trio does automatically for you if you return
|
||||
# Abort.SUCCEEDED.)
|
||||
trio.lowlevel.reschedule(task, outcome.capture(raise_cancel))
|
||||
|
||||
# Option 2:
|
||||
# wait to be woken by "someone", and then decide whether to raise
|
||||
# the error from inside the task.
|
||||
outer_raise_cancel = None
|
||||
def abort(inner_raise_cancel):
|
||||
nonlocal outer_raise_cancel
|
||||
outer_raise_cancel = inner_raise_cancel
|
||||
TRY_TO_CANCEL_OPERATION()
|
||||
return trio.lowlevel.Abort.FAILED
|
||||
await wait_task_rescheduled(abort)
|
||||
if OPERATION_WAS_SUCCESSFULLY_CANCELLED:
|
||||
# raises the error
|
||||
outer_raise_cancel()
|
||||
|
||||
In any case it's guaranteed that we only call the ``abort_func`` at most
|
||||
once per call to :func:`wait_task_rescheduled`.
|
||||
|
||||
Sometimes, it's useful to be able to share some mutable sleep-related data
|
||||
between the sleeping task, the abort function, and the waking task. You
|
||||
can use the sleeping task's :data:`~Task.custom_sleep_data` attribute to
|
||||
store this data, and Trio won't touch it, except to make sure that it gets
|
||||
cleared when the task is rescheduled.
|
||||
|
||||
.. warning::
|
||||
|
||||
If your ``abort_func`` raises an error, or returns any value other than
|
||||
:data:`Abort.SUCCEEDED` or :data:`Abort.FAILED`, then Trio will crash
|
||||
violently. Be careful! Similarly, it is entirely possible to deadlock a
|
||||
Trio program by failing to reschedule a blocked task, or cause havoc by
|
||||
calling :func:`reschedule` too many times. Remember what we said up
|
||||
above about how you should use a higher-level API if at all possible?
|
||||
|
||||
"""
|
||||
return (await _async_yield(WaitTaskRescheduled(abort_func))).unwrap()
|
||||
|
||||
|
||||
# Not exported in the trio._core namespace, but imported directly by _run.
|
||||
@attr.s(frozen=True)
|
||||
class PermanentlyDetachCoroutineObject:
|
||||
final_outcome = attr.ib()
|
||||
|
||||
|
||||
async def permanently_detach_coroutine_object(final_outcome):
|
||||
"""Permanently detach the current task from the Trio scheduler.
|
||||
|
||||
Normally, a Trio task doesn't exit until its coroutine object exits. When
|
||||
you call this function, Trio acts like the coroutine object just exited
|
||||
and the task terminates with the given outcome. This is useful if you want
|
||||
to permanently switch the coroutine object over to a different coroutine
|
||||
runner.
|
||||
|
||||
When the calling coroutine enters this function it's running under Trio,
|
||||
and when the function returns it's running under the foreign coroutine
|
||||
runner.
|
||||
|
||||
You should make sure that the coroutine object has released any
|
||||
Trio-specific resources it has acquired (e.g. nurseries).
|
||||
|
||||
Args:
|
||||
final_outcome (outcome.Outcome): Trio acts as if the current task exited
|
||||
with the given return value or exception.
|
||||
|
||||
Returns or raises whatever value or exception the new coroutine runner
|
||||
uses to resume the coroutine.
|
||||
|
||||
"""
|
||||
if _run.current_task().child_nurseries:
|
||||
raise RuntimeError(
|
||||
"can't permanently detach a coroutine object with open nurseries"
|
||||
)
|
||||
return await _async_yield(PermanentlyDetachCoroutineObject(final_outcome))
|
||||
|
||||
|
||||
async def temporarily_detach_coroutine_object(abort_func):
|
||||
"""Temporarily detach the current coroutine object from the Trio
|
||||
scheduler.
|
||||
|
||||
When the calling coroutine enters this function it's running under Trio,
|
||||
and when the function returns it's running under the foreign coroutine
|
||||
runner.
|
||||
|
||||
The Trio :class:`Task` will continue to exist, but will be suspended until
|
||||
you use :func:`reattach_detached_coroutine_object` to resume it. In the
|
||||
mean time, you can use another coroutine runner to schedule the coroutine
|
||||
object. In fact, you have to – the function doesn't return until the
|
||||
coroutine is advanced from outside.
|
||||
|
||||
Note that you'll need to save the current :class:`Task` object to later
|
||||
resume; you can retrieve it with :func:`current_task`. You can also use
|
||||
this :class:`Task` object to retrieve the coroutine object – see
|
||||
:data:`Task.coro`.
|
||||
|
||||
Args:
|
||||
abort_func: Same as for :func:`wait_task_rescheduled`, except that it
|
||||
must return :data:`Abort.FAILED`. (If it returned
|
||||
:data:`Abort.SUCCEEDED`, then Trio would attempt to reschedule the
|
||||
detached task directly without going through
|
||||
:func:`reattach_detached_coroutine_object`, which would be bad.)
|
||||
Your ``abort_func`` should still arrange for whatever the coroutine
|
||||
object is doing to be cancelled, and then reattach to Trio and call
|
||||
the ``raise_cancel`` callback, if possible.
|
||||
|
||||
Returns or raises whatever value or exception the new coroutine runner
|
||||
uses to resume the coroutine.
|
||||
|
||||
"""
|
||||
return await _async_yield(WaitTaskRescheduled(abort_func))
|
||||
|
||||
|
||||
async def reattach_detached_coroutine_object(task, yield_value):
|
||||
"""Reattach a coroutine object that was detached using
|
||||
:func:`temporarily_detach_coroutine_object`.
|
||||
|
||||
When the calling coroutine enters this function it's running under the
|
||||
foreign coroutine runner, and when the function returns it's running under
|
||||
Trio.
|
||||
|
||||
This must be called from inside the coroutine being resumed, and yields
|
||||
whatever value you pass in. (Presumably you'll pass a value that will
|
||||
cause the current coroutine runner to stop scheduling this task.) Then the
|
||||
coroutine is resumed by the Trio scheduler at the next opportunity.
|
||||
|
||||
Args:
|
||||
task (Task): The Trio task object that the current coroutine was
|
||||
detached from.
|
||||
yield_value (object): The object to yield to the current coroutine
|
||||
runner.
|
||||
|
||||
"""
|
||||
# This is a kind of crude check – in particular, it can fail if the
|
||||
# passed-in task is where the coroutine *runner* is running. But this is
|
||||
# an experts-only interface, and there's no easy way to do a more accurate
|
||||
# check, so I guess that's OK.
|
||||
if not task.coro.cr_running:
|
||||
raise RuntimeError("given task does not match calling coroutine")
|
||||
_run.reschedule(task, outcome.Value("reattaching"))
|
||||
value = await _async_yield(yield_value)
|
||||
assert value == outcome.Value("reattaching")
|
||||
@@ -0,0 +1,149 @@
|
||||
import attr
|
||||
|
||||
from .. import _core
|
||||
from .._deprecate import deprecated
|
||||
from .._util import Final
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
class _UnboundedQueueStats:
|
||||
qsize = attr.ib()
|
||||
tasks_waiting = attr.ib()
|
||||
|
||||
|
||||
class UnboundedQueue(metaclass=Final):
|
||||
"""An unbounded queue suitable for certain unusual forms of inter-task
|
||||
communication.
|
||||
|
||||
This class is designed for use as a queue in cases where the producer for
|
||||
some reason cannot be subjected to back-pressure, i.e., :meth:`put_nowait`
|
||||
has to always succeed. In order to prevent the queue backlog from actually
|
||||
growing without bound, the consumer API is modified to dequeue items in
|
||||
"batches". If a consumer task processes each batch without yielding, then
|
||||
this helps achieve (but does not guarantee) an effective bound on the
|
||||
queue's memory use, at the cost of potentially increasing system latencies
|
||||
in general. You should generally prefer to use a memory channel
|
||||
instead if you can.
|
||||
|
||||
Currently each batch completely empties the queue, but `this may change in
|
||||
the future <https://github.com/python-trio/trio/issues/51>`__.
|
||||
|
||||
A :class:`UnboundedQueue` object can be used as an asynchronous iterator,
|
||||
where each iteration returns a new batch of items. I.e., these two loops
|
||||
are equivalent::
|
||||
|
||||
async for batch in queue:
|
||||
...
|
||||
|
||||
while True:
|
||||
obj = await queue.get_batch()
|
||||
...
|
||||
|
||||
"""
|
||||
|
||||
@deprecated(
|
||||
"0.9.0",
|
||||
issue=497,
|
||||
thing="trio.lowlevel.UnboundedQueue",
|
||||
instead="trio.open_memory_channel(math.inf)",
|
||||
)
|
||||
def __init__(self):
|
||||
self._lot = _core.ParkingLot()
|
||||
self._data = []
|
||||
# used to allow handoff from put to the first task in the lot
|
||||
self._can_get = False
|
||||
|
||||
def __repr__(self):
|
||||
return "<UnboundedQueue holding {} items>".format(len(self._data))
|
||||
|
||||
def qsize(self):
|
||||
"""Returns the number of items currently in the queue."""
|
||||
return len(self._data)
|
||||
|
||||
def empty(self):
|
||||
"""Returns True if the queue is empty, False otherwise.
|
||||
|
||||
There is some subtlety to interpreting this method's return value: see
|
||||
`issue #63 <https://github.com/python-trio/trio/issues/63>`__.
|
||||
|
||||
"""
|
||||
return not self._data
|
||||
|
||||
@_core.enable_ki_protection
|
||||
def put_nowait(self, obj):
|
||||
"""Put an object into the queue, without blocking.
|
||||
|
||||
This always succeeds, because the queue is unbounded. We don't provide
|
||||
a blocking ``put`` method, because it would never need to block.
|
||||
|
||||
Args:
|
||||
obj (object): The object to enqueue.
|
||||
|
||||
"""
|
||||
if not self._data:
|
||||
assert not self._can_get
|
||||
if self._lot:
|
||||
self._lot.unpark(count=1)
|
||||
else:
|
||||
self._can_get = True
|
||||
self._data.append(obj)
|
||||
|
||||
def _get_batch_protected(self):
|
||||
data = self._data.copy()
|
||||
self._data.clear()
|
||||
self._can_get = False
|
||||
return data
|
||||
|
||||
def get_batch_nowait(self):
|
||||
"""Attempt to get the next batch from the queue, without blocking.
|
||||
|
||||
Returns:
|
||||
list: A list of dequeued items, in order. On a successful call this
|
||||
list is always non-empty; if it would be empty we raise
|
||||
:exc:`~trio.WouldBlock` instead.
|
||||
|
||||
Raises:
|
||||
~trio.WouldBlock: if the queue is empty.
|
||||
|
||||
"""
|
||||
if not self._can_get:
|
||||
raise _core.WouldBlock
|
||||
return self._get_batch_protected()
|
||||
|
||||
async def get_batch(self):
|
||||
"""Get the next batch from the queue, blocking as necessary.
|
||||
|
||||
Returns:
|
||||
list: A list of dequeued items, in order. This list is always
|
||||
non-empty.
|
||||
|
||||
"""
|
||||
await _core.checkpoint_if_cancelled()
|
||||
if not self._can_get:
|
||||
await self._lot.park()
|
||||
return self._get_batch_protected()
|
||||
else:
|
||||
try:
|
||||
return self._get_batch_protected()
|
||||
finally:
|
||||
await _core.cancel_shielded_checkpoint()
|
||||
|
||||
def statistics(self):
|
||||
"""Return an object containing debugging information.
|
||||
|
||||
Currently the following fields are defined:
|
||||
|
||||
* ``qsize``: The number of items currently in the queue.
|
||||
* ``tasks_waiting``: The number of tasks blocked on this queue's
|
||||
:meth:`get_batch` method.
|
||||
|
||||
"""
|
||||
return _UnboundedQueueStats(
|
||||
qsize=len(self._data), tasks_waiting=self._lot.statistics().tasks_waiting
|
||||
)
|
||||
|
||||
def __aiter__(self):
|
||||
return self
|
||||
|
||||
async def __anext__(self):
|
||||
return await self.get_batch()
|
||||
@@ -0,0 +1,98 @@
|
||||
import socket
|
||||
import sys
|
||||
import signal
|
||||
import warnings
|
||||
|
||||
from .. import _core
|
||||
from .._util import is_main_thread
|
||||
|
||||
|
||||
def _has_warn_on_full_buffer():
|
||||
if sys.version_info < (3, 7):
|
||||
return False
|
||||
|
||||
if "__pypy__" not in sys.builtin_module_names:
|
||||
# CPython has warn_on_full_buffer. Don't need to inspect.
|
||||
# Also, CPython doesn't support inspecting built-in functions.
|
||||
return True
|
||||
|
||||
import inspect
|
||||
|
||||
args_spec = inspect.getfullargspec(signal.set_wakeup_fd)
|
||||
return "warn_on_full_buffer" in args_spec.kwonlyargs
|
||||
|
||||
|
||||
HAVE_WARN_ON_FULL_BUFFER = _has_warn_on_full_buffer()
|
||||
|
||||
|
||||
class WakeupSocketpair:
|
||||
def __init__(self):
|
||||
self.wakeup_sock, self.write_sock = socket.socketpair()
|
||||
self.wakeup_sock.setblocking(False)
|
||||
self.write_sock.setblocking(False)
|
||||
# This somewhat reduces the amount of memory wasted queueing up data
|
||||
# for wakeups. With these settings, maximum number of 1-byte sends
|
||||
# before getting BlockingIOError:
|
||||
# Linux 4.8: 6
|
||||
# macOS (darwin 15.5): 1
|
||||
# Windows 10: 525347
|
||||
# Windows you're weird. (And on Windows setting SNDBUF to 0 makes send
|
||||
# blocking, even on non-blocking sockets, so don't do that.)
|
||||
#
|
||||
# But, if we're on an old Python and can't control the signal module's
|
||||
# warn-on-full-buffer behavior, then we need to leave things alone, so
|
||||
# the signal module won't spam the console with spurious warnings.
|
||||
if HAVE_WARN_ON_FULL_BUFFER:
|
||||
self.wakeup_sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1)
|
||||
self.write_sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1)
|
||||
# On Windows this is a TCP socket so this might matter. On other
|
||||
# platforms this fails b/c AF_UNIX sockets aren't actually TCP.
|
||||
try:
|
||||
self.write_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
||||
except OSError:
|
||||
pass
|
||||
self.old_wakeup_fd = None
|
||||
|
||||
def wakeup_thread_and_signal_safe(self):
|
||||
try:
|
||||
self.write_sock.send(b"\x00")
|
||||
except BlockingIOError:
|
||||
pass
|
||||
|
||||
async def wait_woken(self):
|
||||
await _core.wait_readable(self.wakeup_sock)
|
||||
self.drain()
|
||||
|
||||
def drain(self):
|
||||
try:
|
||||
while True:
|
||||
self.wakeup_sock.recv(2 ** 16)
|
||||
except BlockingIOError:
|
||||
pass
|
||||
|
||||
def wakeup_on_signals(self):
|
||||
assert self.old_wakeup_fd is None
|
||||
if not is_main_thread():
|
||||
return
|
||||
fd = self.write_sock.fileno()
|
||||
if HAVE_WARN_ON_FULL_BUFFER:
|
||||
self.old_wakeup_fd = signal.set_wakeup_fd(fd, warn_on_full_buffer=False)
|
||||
else:
|
||||
self.old_wakeup_fd = signal.set_wakeup_fd(fd)
|
||||
if self.old_wakeup_fd != -1:
|
||||
warnings.warn(
|
||||
RuntimeWarning(
|
||||
"It looks like Trio's signal handling code might have "
|
||||
"collided with another library you're using. If you're "
|
||||
"running Trio in guest mode, then this might mean you "
|
||||
"should set host_uses_signal_set_wakeup_fd=True. "
|
||||
"Otherwise, file a bug on Trio and we'll help you figure "
|
||||
"out what's going on."
|
||||
)
|
||||
)
|
||||
|
||||
def close(self):
|
||||
self.wakeup_sock.close()
|
||||
self.write_sock.close()
|
||||
if self.old_wakeup_fd is not None:
|
||||
signal.set_wakeup_fd(self.old_wakeup_fd)
|
||||
323
asq-env/lib/python3.9/site-packages/trio/_core/_windows_cffi.py
Normal file
323
asq-env/lib/python3.9/site-packages/trio/_core/_windows_cffi.py
Normal file
@@ -0,0 +1,323 @@
|
||||
import cffi
|
||||
import re
|
||||
import enum
|
||||
|
||||
################################################################
|
||||
# Functions and types
|
||||
################################################################
|
||||
|
||||
LIB = """
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa383751(v=vs.85).aspx
|
||||
typedef int BOOL;
|
||||
typedef unsigned char BYTE;
|
||||
typedef BYTE BOOLEAN;
|
||||
typedef void* PVOID;
|
||||
typedef PVOID HANDLE;
|
||||
typedef unsigned long DWORD;
|
||||
typedef unsigned long ULONG;
|
||||
typedef unsigned int NTSTATUS;
|
||||
typedef unsigned long u_long;
|
||||
typedef ULONG *PULONG;
|
||||
typedef const void *LPCVOID;
|
||||
typedef void *LPVOID;
|
||||
typedef const wchar_t *LPCWSTR;
|
||||
|
||||
typedef uintptr_t ULONG_PTR;
|
||||
typedef uintptr_t UINT_PTR;
|
||||
|
||||
typedef UINT_PTR SOCKET;
|
||||
|
||||
typedef struct _OVERLAPPED {
|
||||
ULONG_PTR Internal;
|
||||
ULONG_PTR InternalHigh;
|
||||
union {
|
||||
struct {
|
||||
DWORD Offset;
|
||||
DWORD OffsetHigh;
|
||||
} DUMMYSTRUCTNAME;
|
||||
PVOID Pointer;
|
||||
} DUMMYUNIONNAME;
|
||||
|
||||
HANDLE hEvent;
|
||||
} OVERLAPPED, *LPOVERLAPPED;
|
||||
|
||||
typedef OVERLAPPED WSAOVERLAPPED;
|
||||
typedef LPOVERLAPPED LPWSAOVERLAPPED;
|
||||
typedef PVOID LPSECURITY_ATTRIBUTES;
|
||||
typedef PVOID LPCSTR;
|
||||
|
||||
typedef struct _OVERLAPPED_ENTRY {
|
||||
ULONG_PTR lpCompletionKey;
|
||||
LPOVERLAPPED lpOverlapped;
|
||||
ULONG_PTR Internal;
|
||||
DWORD dwNumberOfBytesTransferred;
|
||||
} OVERLAPPED_ENTRY, *LPOVERLAPPED_ENTRY;
|
||||
|
||||
// kernel32.dll
|
||||
HANDLE WINAPI CreateIoCompletionPort(
|
||||
_In_ HANDLE FileHandle,
|
||||
_In_opt_ HANDLE ExistingCompletionPort,
|
||||
_In_ ULONG_PTR CompletionKey,
|
||||
_In_ DWORD NumberOfConcurrentThreads
|
||||
);
|
||||
|
||||
BOOL SetFileCompletionNotificationModes(
|
||||
HANDLE FileHandle,
|
||||
UCHAR Flags
|
||||
);
|
||||
|
||||
HANDLE CreateFileW(
|
||||
LPCWSTR lpFileName,
|
||||
DWORD dwDesiredAccess,
|
||||
DWORD dwShareMode,
|
||||
LPSECURITY_ATTRIBUTES lpSecurityAttributes,
|
||||
DWORD dwCreationDisposition,
|
||||
DWORD dwFlagsAndAttributes,
|
||||
HANDLE hTemplateFile
|
||||
);
|
||||
|
||||
BOOL WINAPI CloseHandle(
|
||||
_In_ HANDLE hObject
|
||||
);
|
||||
|
||||
BOOL WINAPI PostQueuedCompletionStatus(
|
||||
_In_ HANDLE CompletionPort,
|
||||
_In_ DWORD dwNumberOfBytesTransferred,
|
||||
_In_ ULONG_PTR dwCompletionKey,
|
||||
_In_opt_ LPOVERLAPPED lpOverlapped
|
||||
);
|
||||
|
||||
BOOL WINAPI GetQueuedCompletionStatusEx(
|
||||
_In_ HANDLE CompletionPort,
|
||||
_Out_ LPOVERLAPPED_ENTRY lpCompletionPortEntries,
|
||||
_In_ ULONG ulCount,
|
||||
_Out_ PULONG ulNumEntriesRemoved,
|
||||
_In_ DWORD dwMilliseconds,
|
||||
_In_ BOOL fAlertable
|
||||
);
|
||||
|
||||
BOOL WINAPI CancelIoEx(
|
||||
_In_ HANDLE hFile,
|
||||
_In_opt_ LPOVERLAPPED lpOverlapped
|
||||
);
|
||||
|
||||
BOOL WriteFile(
|
||||
HANDLE hFile,
|
||||
LPCVOID lpBuffer,
|
||||
DWORD nNumberOfBytesToWrite,
|
||||
LPDWORD lpNumberOfBytesWritten,
|
||||
LPOVERLAPPED lpOverlapped
|
||||
);
|
||||
|
||||
BOOL ReadFile(
|
||||
HANDLE hFile,
|
||||
LPVOID lpBuffer,
|
||||
DWORD nNumberOfBytesToRead,
|
||||
LPDWORD lpNumberOfBytesRead,
|
||||
LPOVERLAPPED lpOverlapped
|
||||
);
|
||||
|
||||
BOOL WINAPI SetConsoleCtrlHandler(
|
||||
_In_opt_ void* HandlerRoutine,
|
||||
_In_ BOOL Add
|
||||
);
|
||||
|
||||
HANDLE CreateEventA(
|
||||
LPSECURITY_ATTRIBUTES lpEventAttributes,
|
||||
BOOL bManualReset,
|
||||
BOOL bInitialState,
|
||||
LPCSTR lpName
|
||||
);
|
||||
|
||||
BOOL SetEvent(
|
||||
HANDLE hEvent
|
||||
);
|
||||
|
||||
BOOL ResetEvent(
|
||||
HANDLE hEvent
|
||||
);
|
||||
|
||||
DWORD WaitForSingleObject(
|
||||
HANDLE hHandle,
|
||||
DWORD dwMilliseconds
|
||||
);
|
||||
|
||||
DWORD WaitForMultipleObjects(
|
||||
DWORD nCount,
|
||||
HANDLE *lpHandles,
|
||||
BOOL bWaitAll,
|
||||
DWORD dwMilliseconds
|
||||
);
|
||||
|
||||
ULONG RtlNtStatusToDosError(
|
||||
NTSTATUS Status
|
||||
);
|
||||
|
||||
int WSAIoctl(
|
||||
SOCKET s,
|
||||
DWORD dwIoControlCode,
|
||||
LPVOID lpvInBuffer,
|
||||
DWORD cbInBuffer,
|
||||
LPVOID lpvOutBuffer,
|
||||
DWORD cbOutBuffer,
|
||||
LPDWORD lpcbBytesReturned,
|
||||
LPWSAOVERLAPPED lpOverlapped,
|
||||
// actually LPWSAOVERLAPPED_COMPLETION_ROUTINE
|
||||
void* lpCompletionRoutine
|
||||
);
|
||||
|
||||
int WSAGetLastError();
|
||||
|
||||
BOOL DeviceIoControl(
|
||||
HANDLE hDevice,
|
||||
DWORD dwIoControlCode,
|
||||
LPVOID lpInBuffer,
|
||||
DWORD nInBufferSize,
|
||||
LPVOID lpOutBuffer,
|
||||
DWORD nOutBufferSize,
|
||||
LPDWORD lpBytesReturned,
|
||||
LPOVERLAPPED lpOverlapped
|
||||
);
|
||||
|
||||
// From https://github.com/piscisaureus/wepoll/blob/master/src/afd.h
|
||||
typedef struct _AFD_POLL_HANDLE_INFO {
|
||||
HANDLE Handle;
|
||||
ULONG Events;
|
||||
NTSTATUS Status;
|
||||
} AFD_POLL_HANDLE_INFO, *PAFD_POLL_HANDLE_INFO;
|
||||
|
||||
// This is really defined as a messy union to allow stuff like
|
||||
// i.DUMMYSTRUCTNAME.LowPart, but we don't need those complications.
|
||||
// Under all that it's just an int64.
|
||||
typedef int64_t LARGE_INTEGER;
|
||||
|
||||
typedef struct _AFD_POLL_INFO {
|
||||
LARGE_INTEGER Timeout;
|
||||
ULONG NumberOfHandles;
|
||||
ULONG Exclusive;
|
||||
AFD_POLL_HANDLE_INFO Handles[1];
|
||||
} AFD_POLL_INFO, *PAFD_POLL_INFO;
|
||||
|
||||
"""
|
||||
|
||||
# cribbed from pywincffi
|
||||
# programmatically strips out those annotations MSDN likes, like _In_
|
||||
REGEX_SAL_ANNOTATION = re.compile(
|
||||
r"\b(_In_|_Inout_|_Out_|_Outptr_|_Reserved_)(opt_)?\b"
|
||||
)
|
||||
LIB = REGEX_SAL_ANNOTATION.sub(" ", LIB)
|
||||
|
||||
# Other fixups:
|
||||
# - get rid of FAR, cffi doesn't like it
|
||||
LIB = re.sub(r"\bFAR\b", " ", LIB)
|
||||
# - PASCAL is apparently an alias for __stdcall (on modern compilers - modern
|
||||
# being _MSC_VER >= 800)
|
||||
LIB = re.sub(r"\bPASCAL\b", "__stdcall", LIB)
|
||||
|
||||
ffi = cffi.FFI()
|
||||
ffi.cdef(LIB)
|
||||
|
||||
kernel32 = ffi.dlopen("kernel32.dll")
|
||||
ntdll = ffi.dlopen("ntdll.dll")
|
||||
ws2_32 = ffi.dlopen("ws2_32.dll")
|
||||
|
||||
################################################################
|
||||
# Magic numbers
|
||||
################################################################
|
||||
|
||||
# Here's a great resource for looking these up:
|
||||
# https://www.magnumdb.com
|
||||
# (Tip: check the box to see "Hex value")
|
||||
|
||||
INVALID_HANDLE_VALUE = ffi.cast("HANDLE", -1)
|
||||
|
||||
|
||||
class ErrorCodes(enum.IntEnum):
|
||||
STATUS_TIMEOUT = 0x102
|
||||
WAIT_TIMEOUT = 0x102
|
||||
WAIT_ABANDONED = 0x80
|
||||
WAIT_OBJECT_0 = 0x00 # object is signaled
|
||||
WAIT_FAILED = 0xFFFFFFFF
|
||||
ERROR_IO_PENDING = 997
|
||||
ERROR_OPERATION_ABORTED = 995
|
||||
ERROR_ABANDONED_WAIT_0 = 735
|
||||
ERROR_INVALID_HANDLE = 6
|
||||
ERROR_INVALID_PARMETER = 87
|
||||
ERROR_NOT_FOUND = 1168
|
||||
ERROR_NOT_SOCKET = 10038
|
||||
|
||||
|
||||
class FileFlags(enum.IntEnum):
|
||||
GENERIC_READ = 0x80000000
|
||||
SYNCHRONIZE = 0x00100000
|
||||
FILE_FLAG_OVERLAPPED = 0x40000000
|
||||
FILE_SHARE_READ = 1
|
||||
FILE_SHARE_WRITE = 2
|
||||
FILE_SHARE_DELETE = 4
|
||||
CREATE_NEW = 1
|
||||
CREATE_ALWAYS = 2
|
||||
OPEN_EXISTING = 3
|
||||
OPEN_ALWAYS = 4
|
||||
TRUNCATE_EXISTING = 5
|
||||
|
||||
|
||||
class AFDPollFlags(enum.IntFlag):
|
||||
# These are drawn from a combination of:
|
||||
# https://github.com/piscisaureus/wepoll/blob/master/src/afd.h
|
||||
# https://github.com/reactos/reactos/blob/master/sdk/include/reactos/drivers/afd/shared.h
|
||||
AFD_POLL_RECEIVE = 0x0001
|
||||
AFD_POLL_RECEIVE_EXPEDITED = 0x0002 # OOB/urgent data
|
||||
AFD_POLL_SEND = 0x0004
|
||||
AFD_POLL_DISCONNECT = 0x0008 # received EOF (FIN)
|
||||
AFD_POLL_ABORT = 0x0010 # received RST
|
||||
AFD_POLL_LOCAL_CLOSE = 0x0020 # local socket object closed
|
||||
AFD_POLL_CONNECT = 0x0040 # socket is successfully connected
|
||||
AFD_POLL_ACCEPT = 0x0080 # you can call accept on this socket
|
||||
AFD_POLL_CONNECT_FAIL = 0x0100 # connect() terminated unsuccessfully
|
||||
# See WSAEventSelect docs for more details on these four:
|
||||
AFD_POLL_QOS = 0x0200
|
||||
AFD_POLL_GROUP_QOS = 0x0400
|
||||
AFD_POLL_ROUTING_INTERFACE_CHANGE = 0x0800
|
||||
AFD_POLL_EVENT_ADDRESS_LIST_CHANGE = 0x1000
|
||||
|
||||
|
||||
class WSAIoctls(enum.IntEnum):
|
||||
SIO_BASE_HANDLE = 0x48000022
|
||||
SIO_BSP_HANDLE_SELECT = 0x4800001C
|
||||
SIO_BSP_HANDLE_POLL = 0x4800001D
|
||||
|
||||
|
||||
class CompletionModes(enum.IntFlag):
|
||||
FILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 0x1
|
||||
FILE_SKIP_SET_EVENT_ON_HANDLE = 0x2
|
||||
|
||||
|
||||
class IoControlCodes(enum.IntEnum):
|
||||
IOCTL_AFD_POLL = 0x00012024
|
||||
|
||||
|
||||
################################################################
|
||||
# Generic helpers
|
||||
################################################################
|
||||
|
||||
|
||||
def _handle(obj):
|
||||
# For now, represent handles as either cffi HANDLEs or as ints. If you
|
||||
# try to pass in a file descriptor instead, it's not going to work
|
||||
# out. (For that msvcrt.get_osfhandle does the trick, but I don't know if
|
||||
# we'll actually need that for anything...) For sockets this doesn't
|
||||
# matter, Python never allocates an fd. So let's wait until we actually
|
||||
# encounter the problem before worrying about it.
|
||||
if type(obj) is int:
|
||||
return ffi.cast("HANDLE", obj)
|
||||
else:
|
||||
return obj
|
||||
|
||||
|
||||
def raise_winerror(winerror=None, *, filename=None, filename2=None):
|
||||
if winerror is None:
|
||||
winerror, msg = ffi.getwinerror()
|
||||
else:
|
||||
_, msg = ffi.getwinerror(winerror)
|
||||
# https://docs.python.org/3/library/exceptions.html#OSError
|
||||
raise OSError(0, msg, filename, winerror, filename2)
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user