asd
This commit is contained in:
79
venv/lib/python3.12/site-packages/eventlet/__init__.py
Normal file
79
venv/lib/python3.12/site-packages/eventlet/__init__.py
Normal file
@ -0,0 +1,79 @@
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
|
||||
from eventlet import convenience
|
||||
from eventlet import event
|
||||
from eventlet import greenpool
|
||||
from eventlet import greenthread
|
||||
from eventlet import patcher
|
||||
from eventlet import queue
|
||||
from eventlet import semaphore
|
||||
from eventlet import support
|
||||
from eventlet import timeout
|
||||
# NOTE(hberaud): Versions are now managed by hatch and control version.
|
||||
# hatch has a build hook which generates the version file, however,
|
||||
# if the project is installed in editable mode then the _version.py file
|
||||
# will not be updated unless the package is reinstalled (or locally rebuilt).
|
||||
# For further details, please read:
|
||||
# https://github.com/ofek/hatch-vcs#build-hook
|
||||
# https://github.com/maresb/hatch-vcs-footgun-example
|
||||
try:
|
||||
from eventlet._version import __version__
|
||||
except ImportError:
|
||||
__version__ = "0.0.0"
|
||||
import greenlet
|
||||
|
||||
# Force monotonic library search as early as possible.
|
||||
# Helpful when CPython < 3.5 on Linux blocked in `os.waitpid(-1)` before first use of hub.
|
||||
# Example: gunicorn
|
||||
# https://github.com/eventlet/eventlet/issues/401#issuecomment-327500352
|
||||
try:
|
||||
import monotonic
|
||||
del monotonic
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
connect = convenience.connect
|
||||
listen = convenience.listen
|
||||
serve = convenience.serve
|
||||
StopServe = convenience.StopServe
|
||||
wrap_ssl = convenience.wrap_ssl
|
||||
|
||||
Event = event.Event
|
||||
|
||||
GreenPool = greenpool.GreenPool
|
||||
GreenPile = greenpool.GreenPile
|
||||
|
||||
sleep = greenthread.sleep
|
||||
spawn = greenthread.spawn
|
||||
spawn_n = greenthread.spawn_n
|
||||
spawn_after = greenthread.spawn_after
|
||||
kill = greenthread.kill
|
||||
|
||||
import_patched = patcher.import_patched
|
||||
monkey_patch = patcher.monkey_patch
|
||||
|
||||
Queue = queue.Queue
|
||||
|
||||
Semaphore = semaphore.Semaphore
|
||||
CappedSemaphore = semaphore.CappedSemaphore
|
||||
BoundedSemaphore = semaphore.BoundedSemaphore
|
||||
|
||||
Timeout = timeout.Timeout
|
||||
with_timeout = timeout.with_timeout
|
||||
wrap_is_timeout = timeout.wrap_is_timeout
|
||||
is_timeout = timeout.is_timeout
|
||||
|
||||
getcurrent = greenlet.greenlet.getcurrent
|
||||
|
||||
# deprecated
|
||||
TimeoutError, exc_after, call_after_global = (
|
||||
support.wrap_deprecated(old, new)(fun) for old, new, fun in (
|
||||
('TimeoutError', 'Timeout', Timeout),
|
||||
('exc_after', 'greenthread.exc_after', greenthread.exc_after),
|
||||
('call_after_global', 'greenthread.call_after_global', greenthread.call_after_global),
|
||||
))
|
||||
|
||||
os
|
||||
16
venv/lib/python3.12/site-packages/eventlet/_version.py
Normal file
16
venv/lib/python3.12/site-packages/eventlet/_version.py
Normal file
@ -0,0 +1,16 @@
|
||||
# file generated by setuptools_scm
|
||||
# don't change, don't track in version control
|
||||
TYPE_CHECKING = False
|
||||
if TYPE_CHECKING:
|
||||
from typing import Tuple, Union
|
||||
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
||||
else:
|
||||
VERSION_TUPLE = object
|
||||
|
||||
version: str
|
||||
__version__: str
|
||||
__version_tuple__: VERSION_TUPLE
|
||||
version_tuple: VERSION_TUPLE
|
||||
|
||||
__version__ = version = '0.38.0'
|
||||
__version_tuple__ = version_tuple = (0, 38, 0)
|
||||
57
venv/lib/python3.12/site-packages/eventlet/asyncio.py
Normal file
57
venv/lib/python3.12/site-packages/eventlet/asyncio.py
Normal file
@ -0,0 +1,57 @@
|
||||
"""
|
||||
Asyncio compatibility functions.
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
from greenlet import GreenletExit
|
||||
|
||||
from .greenthread import spawn, getcurrent
|
||||
from .event import Event
|
||||
from .hubs import get_hub
|
||||
from .hubs.asyncio import Hub as AsyncioHub
|
||||
|
||||
__all__ = ["spawn_for_awaitable"]
|
||||
|
||||
|
||||
def spawn_for_awaitable(coroutine):
|
||||
"""
|
||||
Take a coroutine or some other object that can be awaited
|
||||
(``asyncio.Future``, ``asyncio.Task``), and turn it into a ``GreenThread``.
|
||||
|
||||
Known limitations:
|
||||
|
||||
* The coroutine/future/etc. don't run in their own
|
||||
greenlet/``GreenThread``.
|
||||
* As a result, things like ``eventlet.Lock``
|
||||
won't work correctly inside ``async`` functions, thread ids aren't
|
||||
meaningful, and so on.
|
||||
"""
|
||||
if not isinstance(get_hub(), AsyncioHub):
|
||||
raise RuntimeError(
|
||||
"This API only works with eventlet's asyncio hub. "
|
||||
+ "To use it, set an EVENTLET_HUB=asyncio environment variable."
|
||||
)
|
||||
|
||||
def _run():
|
||||
# Convert the coroutine/Future/Task we're wrapping into a Future.
|
||||
future = asyncio.ensure_future(coroutine, loop=asyncio.get_running_loop())
|
||||
|
||||
# Ensure killing the GreenThread cancels the Future:
|
||||
def _got_result(gthread):
|
||||
try:
|
||||
gthread.wait()
|
||||
except GreenletExit:
|
||||
future.cancel()
|
||||
|
||||
getcurrent().link(_got_result)
|
||||
|
||||
# Wait until the Future has a result.
|
||||
has_result = Event()
|
||||
future.add_done_callback(lambda _: has_result.send(True))
|
||||
has_result.wait()
|
||||
# Return the result of the Future (or raise an exception if it had an
|
||||
# exception).
|
||||
return future.result()
|
||||
|
||||
# Start a GreenThread:
|
||||
return spawn(_run)
|
||||
140
venv/lib/python3.12/site-packages/eventlet/backdoor.py
Normal file
140
venv/lib/python3.12/site-packages/eventlet/backdoor.py
Normal file
@ -0,0 +1,140 @@
|
||||
from code import InteractiveConsole
|
||||
import errno
|
||||
import socket
|
||||
import sys
|
||||
|
||||
import eventlet
|
||||
from eventlet import hubs
|
||||
from eventlet.support import greenlets, get_errno
|
||||
|
||||
try:
|
||||
sys.ps1
|
||||
except AttributeError:
|
||||
sys.ps1 = '>>> '
|
||||
try:
|
||||
sys.ps2
|
||||
except AttributeError:
|
||||
sys.ps2 = '... '
|
||||
|
||||
|
||||
class FileProxy:
|
||||
def __init__(self, f):
|
||||
self.f = f
|
||||
|
||||
def isatty(self):
|
||||
return True
|
||||
|
||||
def flush(self):
|
||||
pass
|
||||
|
||||
def write(self, data, *a, **kw):
|
||||
try:
|
||||
self.f.write(data, *a, **kw)
|
||||
self.f.flush()
|
||||
except OSError as e:
|
||||
if get_errno(e) != errno.EPIPE:
|
||||
raise
|
||||
|
||||
def readline(self, *a):
|
||||
return self.f.readline(*a).replace('\r\n', '\n')
|
||||
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self.f, attr)
|
||||
|
||||
|
||||
# @@tavis: the `locals` args below mask the built-in function. Should
|
||||
# be renamed.
|
||||
class SocketConsole(greenlets.greenlet):
|
||||
def __init__(self, desc, hostport, locals):
|
||||
self.hostport = hostport
|
||||
self.locals = locals
|
||||
# mangle the socket
|
||||
self.desc = FileProxy(desc)
|
||||
greenlets.greenlet.__init__(self)
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
console = InteractiveConsole(self.locals)
|
||||
console.interact()
|
||||
finally:
|
||||
self.switch_out()
|
||||
self.finalize()
|
||||
|
||||
def switch(self, *args, **kw):
|
||||
self.saved = sys.stdin, sys.stderr, sys.stdout
|
||||
sys.stdin = sys.stdout = sys.stderr = self.desc
|
||||
greenlets.greenlet.switch(self, *args, **kw)
|
||||
|
||||
def switch_out(self):
|
||||
sys.stdin, sys.stderr, sys.stdout = self.saved
|
||||
|
||||
def finalize(self):
|
||||
# restore the state of the socket
|
||||
self.desc = None
|
||||
if len(self.hostport) >= 2:
|
||||
host = self.hostport[0]
|
||||
port = self.hostport[1]
|
||||
print("backdoor closed to %s:%s" % (host, port,))
|
||||
else:
|
||||
print('backdoor closed')
|
||||
|
||||
|
||||
def backdoor_server(sock, locals=None):
|
||||
""" Blocking function that runs a backdoor server on the socket *sock*,
|
||||
accepting connections and running backdoor consoles for each client that
|
||||
connects.
|
||||
|
||||
The *locals* argument is a dictionary that will be included in the locals()
|
||||
of the interpreters. It can be convenient to stick important application
|
||||
variables in here.
|
||||
"""
|
||||
listening_on = sock.getsockname()
|
||||
if sock.family == socket.AF_INET:
|
||||
# Expand result to IP + port
|
||||
listening_on = '%s:%s' % listening_on
|
||||
elif sock.family == socket.AF_INET6:
|
||||
ip, port, _, _ = listening_on
|
||||
listening_on = '%s:%s' % (ip, port,)
|
||||
# No action needed if sock.family == socket.AF_UNIX
|
||||
|
||||
print("backdoor server listening on %s" % (listening_on,))
|
||||
try:
|
||||
while True:
|
||||
socketpair = None
|
||||
try:
|
||||
socketpair = sock.accept()
|
||||
backdoor(socketpair, locals)
|
||||
except OSError as e:
|
||||
# Broken pipe means it was shutdown
|
||||
if get_errno(e) != errno.EPIPE:
|
||||
raise
|
||||
finally:
|
||||
if socketpair:
|
||||
socketpair[0].close()
|
||||
finally:
|
||||
sock.close()
|
||||
|
||||
|
||||
def backdoor(conn_info, locals=None):
|
||||
"""Sets up an interactive console on a socket with a single connected
|
||||
client. This does not block the caller, as it spawns a new greenlet to
|
||||
handle the console. This is meant to be called from within an accept loop
|
||||
(such as backdoor_server).
|
||||
"""
|
||||
conn, addr = conn_info
|
||||
if conn.family == socket.AF_INET:
|
||||
host, port = addr
|
||||
print("backdoor to %s:%s" % (host, port))
|
||||
elif conn.family == socket.AF_INET6:
|
||||
host, port, _, _ = addr
|
||||
print("backdoor to %s:%s" % (host, port))
|
||||
else:
|
||||
print('backdoor opened')
|
||||
fl = conn.makefile("rw")
|
||||
console = SocketConsole(fl, addr, locals)
|
||||
hub = hubs.get_hub()
|
||||
hub.schedule_call_global(0, console.switch)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
backdoor_server(eventlet.listen(('127.0.0.1', 9000)), {})
|
||||
190
venv/lib/python3.12/site-packages/eventlet/convenience.py
Normal file
190
venv/lib/python3.12/site-packages/eventlet/convenience.py
Normal file
@ -0,0 +1,190 @@
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from eventlet import greenpool
|
||||
from eventlet import greenthread
|
||||
from eventlet import support
|
||||
from eventlet.green import socket
|
||||
from eventlet.support import greenlets as greenlet
|
||||
|
||||
|
||||
def connect(addr, family=socket.AF_INET, bind=None):
|
||||
"""Convenience function for opening client sockets.
|
||||
|
||||
:param addr: Address of the server to connect to. For TCP sockets, this is a (host, port) tuple.
|
||||
:param family: Socket family, optional. See :mod:`socket` documentation for available families.
|
||||
:param bind: Local address to bind to, optional.
|
||||
:return: The connected green socket object.
|
||||
"""
|
||||
sock = socket.socket(family, socket.SOCK_STREAM)
|
||||
if bind is not None:
|
||||
sock.bind(bind)
|
||||
sock.connect(addr)
|
||||
return sock
|
||||
|
||||
|
||||
class ReuseRandomPortWarning(Warning):
|
||||
pass
|
||||
|
||||
|
||||
class ReusePortUnavailableWarning(Warning):
|
||||
pass
|
||||
|
||||
|
||||
def listen(addr, family=socket.AF_INET, backlog=50, reuse_addr=True, reuse_port=None):
|
||||
"""Convenience function for opening server sockets. This
|
||||
socket can be used in :func:`~eventlet.serve` or a custom ``accept()`` loop.
|
||||
|
||||
Sets SO_REUSEADDR on the socket to save on annoyance.
|
||||
|
||||
:param addr: Address to listen on. For TCP sockets, this is a (host, port) tuple.
|
||||
:param family: Socket family, optional. See :mod:`socket` documentation for available families.
|
||||
:param backlog:
|
||||
|
||||
The maximum number of queued connections. Should be at least 1; the maximum
|
||||
value is system-dependent.
|
||||
|
||||
:return: The listening green socket object.
|
||||
"""
|
||||
sock = socket.socket(family, socket.SOCK_STREAM)
|
||||
if reuse_addr and sys.platform[:3] != 'win':
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
if family in (socket.AF_INET, socket.AF_INET6) and addr[1] == 0:
|
||||
if reuse_port:
|
||||
warnings.warn(
|
||||
'''listen on random port (0) with SO_REUSEPORT is dangerous.
|
||||
Double check your intent.
|
||||
Example problem: https://github.com/eventlet/eventlet/issues/411''',
|
||||
ReuseRandomPortWarning, stacklevel=3)
|
||||
elif reuse_port is None:
|
||||
reuse_port = True
|
||||
if reuse_port and hasattr(socket, 'SO_REUSEPORT'):
|
||||
# NOTE(zhengwei): linux kernel >= 3.9
|
||||
try:
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
||||
# OSError is enough on Python 3+
|
||||
except OSError as ex:
|
||||
if support.get_errno(ex) in (22, 92):
|
||||
# A famous platform defines unsupported socket option.
|
||||
# https://github.com/eventlet/eventlet/issues/380
|
||||
# https://github.com/eventlet/eventlet/issues/418
|
||||
warnings.warn(
|
||||
'''socket.SO_REUSEPORT is defined but not supported.
|
||||
On Windows: known bug, wontfix.
|
||||
On other systems: please comment in the issue linked below.
|
||||
More information: https://github.com/eventlet/eventlet/issues/380''',
|
||||
ReusePortUnavailableWarning, stacklevel=3)
|
||||
|
||||
sock.bind(addr)
|
||||
sock.listen(backlog)
|
||||
return sock
|
||||
|
||||
|
||||
class StopServe(Exception):
|
||||
"""Exception class used for quitting :func:`~eventlet.serve` gracefully."""
|
||||
pass
|
||||
|
||||
|
||||
def _stop_checker(t, server_gt, conn):
|
||||
try:
|
||||
try:
|
||||
t.wait()
|
||||
finally:
|
||||
conn.close()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
except Exception:
|
||||
greenthread.kill(server_gt, *sys.exc_info())
|
||||
|
||||
|
||||
def serve(sock, handle, concurrency=1000):
|
||||
"""Runs a server on the supplied socket. Calls the function *handle* in a
|
||||
separate greenthread for every incoming client connection. *handle* takes
|
||||
two arguments: the client socket object, and the client address::
|
||||
|
||||
def myhandle(client_sock, client_addr):
|
||||
print("client connected", client_addr)
|
||||
|
||||
eventlet.serve(eventlet.listen(('127.0.0.1', 9999)), myhandle)
|
||||
|
||||
Returning from *handle* closes the client socket.
|
||||
|
||||
:func:`serve` blocks the calling greenthread; it won't return until
|
||||
the server completes. If you desire an immediate return,
|
||||
spawn a new greenthread for :func:`serve`.
|
||||
|
||||
Any uncaught exceptions raised in *handle* are raised as exceptions
|
||||
from :func:`serve`, terminating the server, so be sure to be aware of the
|
||||
exceptions your application can raise. The return value of *handle* is
|
||||
ignored.
|
||||
|
||||
Raise a :class:`~eventlet.StopServe` exception to gracefully terminate the
|
||||
server -- that's the only way to get the server() function to return rather
|
||||
than raise.
|
||||
|
||||
The value in *concurrency* controls the maximum number of
|
||||
greenthreads that will be open at any time handling requests. When
|
||||
the server hits the concurrency limit, it stops accepting new
|
||||
connections until the existing ones complete.
|
||||
"""
|
||||
pool = greenpool.GreenPool(concurrency)
|
||||
server_gt = greenthread.getcurrent()
|
||||
|
||||
while True:
|
||||
try:
|
||||
conn, addr = sock.accept()
|
||||
gt = pool.spawn(handle, conn, addr)
|
||||
gt.link(_stop_checker, server_gt, conn)
|
||||
conn, addr, gt = None, None, None
|
||||
except StopServe:
|
||||
return
|
||||
|
||||
|
||||
def wrap_ssl(sock, *a, **kw):
|
||||
"""Convenience function for converting a regular socket into an
|
||||
SSL socket. Has the same interface as :func:`ssl.wrap_socket`,
|
||||
but can also use PyOpenSSL. Though, note that it ignores the
|
||||
`cert_reqs`, `ssl_version`, `ca_certs`, `do_handshake_on_connect`,
|
||||
and `suppress_ragged_eofs` arguments when using PyOpenSSL.
|
||||
|
||||
The preferred idiom is to call wrap_ssl directly on the creation
|
||||
method, e.g., ``wrap_ssl(connect(addr))`` or
|
||||
``wrap_ssl(listen(addr), server_side=True)``. This way there is
|
||||
no "naked" socket sitting around to accidentally corrupt the SSL
|
||||
session.
|
||||
|
||||
:return Green SSL object.
|
||||
"""
|
||||
return wrap_ssl_impl(sock, *a, **kw)
|
||||
|
||||
|
||||
try:
|
||||
from eventlet.green import ssl
|
||||
wrap_ssl_impl = ssl.wrap_socket
|
||||
except ImportError:
|
||||
# trying PyOpenSSL
|
||||
try:
|
||||
from eventlet.green.OpenSSL import SSL
|
||||
except ImportError:
|
||||
def wrap_ssl_impl(*a, **kw):
|
||||
raise ImportError(
|
||||
"To use SSL with Eventlet, you must install PyOpenSSL or use Python 2.7 or later.")
|
||||
else:
|
||||
def wrap_ssl_impl(sock, keyfile=None, certfile=None, server_side=False,
|
||||
cert_reqs=None, ssl_version=None, ca_certs=None,
|
||||
do_handshake_on_connect=True,
|
||||
suppress_ragged_eofs=True, ciphers=None):
|
||||
# theoretically the ssl_version could be respected in this line
|
||||
context = SSL.Context(SSL.SSLv23_METHOD)
|
||||
if certfile is not None:
|
||||
context.use_certificate_file(certfile)
|
||||
if keyfile is not None:
|
||||
context.use_privatekey_file(keyfile)
|
||||
context.set_verify(SSL.VERIFY_NONE, lambda *x: True)
|
||||
|
||||
connection = SSL.Connection(context, sock)
|
||||
if server_side:
|
||||
connection.set_accept_state()
|
||||
else:
|
||||
connection.set_connect_state()
|
||||
return connection
|
||||
53
venv/lib/python3.12/site-packages/eventlet/corolocal.py
Normal file
53
venv/lib/python3.12/site-packages/eventlet/corolocal.py
Normal file
@ -0,0 +1,53 @@
|
||||
import weakref
|
||||
|
||||
from eventlet import greenthread
|
||||
|
||||
__all__ = ['get_ident', 'local']
|
||||
|
||||
|
||||
def get_ident():
|
||||
""" Returns ``id()`` of current greenlet. Useful for debugging."""
|
||||
return id(greenthread.getcurrent())
|
||||
|
||||
|
||||
# the entire purpose of this class is to store off the constructor
|
||||
# arguments in a local variable without calling __init__ directly
|
||||
class _localbase:
|
||||
__slots__ = '_local__args', '_local__greens'
|
||||
|
||||
def __new__(cls, *args, **kw):
|
||||
self = object.__new__(cls)
|
||||
object.__setattr__(self, '_local__args', (args, kw))
|
||||
object.__setattr__(self, '_local__greens', weakref.WeakKeyDictionary())
|
||||
if (args or kw) and (cls.__init__ is object.__init__):
|
||||
raise TypeError("Initialization arguments are not supported")
|
||||
return self
|
||||
|
||||
|
||||
def _patch(thrl):
|
||||
greens = object.__getattribute__(thrl, '_local__greens')
|
||||
# until we can store the localdict on greenlets themselves,
|
||||
# we store it in _local__greens on the local object
|
||||
cur = greenthread.getcurrent()
|
||||
if cur not in greens:
|
||||
# must be the first time we've seen this greenlet, call __init__
|
||||
greens[cur] = {}
|
||||
cls = type(thrl)
|
||||
if cls.__init__ is not object.__init__:
|
||||
args, kw = object.__getattribute__(thrl, '_local__args')
|
||||
thrl.__init__(*args, **kw)
|
||||
object.__setattr__(thrl, '__dict__', greens[cur])
|
||||
|
||||
|
||||
class local(_localbase):
|
||||
def __getattribute__(self, attr):
|
||||
_patch(self)
|
||||
return object.__getattribute__(self, attr)
|
||||
|
||||
def __setattr__(self, attr, value):
|
||||
_patch(self)
|
||||
return object.__setattr__(self, attr, value)
|
||||
|
||||
def __delattr__(self, attr):
|
||||
_patch(self)
|
||||
return object.__delattr__(self, attr)
|
||||
59
venv/lib/python3.12/site-packages/eventlet/coros.py
Normal file
59
venv/lib/python3.12/site-packages/eventlet/coros.py
Normal file
@ -0,0 +1,59 @@
|
||||
from eventlet import event as _event
|
||||
|
||||
|
||||
class metaphore:
|
||||
"""This is sort of an inverse semaphore: a counter that starts at 0 and
|
||||
waits only if nonzero. It's used to implement a "wait for all" scenario.
|
||||
|
||||
>>> from eventlet import coros, spawn_n
|
||||
>>> count = coros.metaphore()
|
||||
>>> count.wait()
|
||||
>>> def decrementer(count, id):
|
||||
... print("{0} decrementing".format(id))
|
||||
... count.dec()
|
||||
...
|
||||
>>> _ = spawn_n(decrementer, count, 'A')
|
||||
>>> _ = spawn_n(decrementer, count, 'B')
|
||||
>>> count.inc(2)
|
||||
>>> count.wait()
|
||||
A decrementing
|
||||
B decrementing
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.counter = 0
|
||||
self.event = _event.Event()
|
||||
# send() right away, else we'd wait on the default 0 count!
|
||||
self.event.send()
|
||||
|
||||
def inc(self, by=1):
|
||||
"""Increment our counter. If this transitions the counter from zero to
|
||||
nonzero, make any subsequent :meth:`wait` call wait.
|
||||
"""
|
||||
assert by > 0
|
||||
self.counter += by
|
||||
if self.counter == by:
|
||||
# If we just incremented self.counter by 'by', and the new count
|
||||
# equals 'by', then the old value of self.counter was 0.
|
||||
# Transitioning from 0 to a nonzero value means wait() must
|
||||
# actually wait.
|
||||
self.event.reset()
|
||||
|
||||
def dec(self, by=1):
|
||||
"""Decrement our counter. If this transitions the counter from nonzero
|
||||
to zero, a current or subsequent wait() call need no longer wait.
|
||||
"""
|
||||
assert by > 0
|
||||
self.counter -= by
|
||||
if self.counter <= 0:
|
||||
# Don't leave self.counter < 0, that will screw things up in
|
||||
# future calls.
|
||||
self.counter = 0
|
||||
# Transitioning from nonzero to 0 means wait() need no longer wait.
|
||||
self.event.send()
|
||||
|
||||
def wait(self):
|
||||
"""Suspend the caller only if our count is nonzero. In that case,
|
||||
resume the caller once the count decrements to zero again.
|
||||
"""
|
||||
self.event.wait()
|
||||
601
venv/lib/python3.12/site-packages/eventlet/dagpool.py
Normal file
601
venv/lib/python3.12/site-packages/eventlet/dagpool.py
Normal file
@ -0,0 +1,601 @@
|
||||
# @file dagpool.py
|
||||
# @author Nat Goodspeed
|
||||
# @date 2016-08-08
|
||||
# @brief Provide DAGPool class
|
||||
|
||||
from eventlet.event import Event
|
||||
from eventlet import greenthread
|
||||
import collections
|
||||
|
||||
|
||||
# value distinguished from any other Python value including None
|
||||
_MISSING = object()
|
||||
|
||||
|
||||
class Collision(Exception):
|
||||
"""
|
||||
DAGPool raises Collision when you try to launch two greenthreads with the
|
||||
same key, or post() a result for a key corresponding to a greenthread, or
|
||||
post() twice for the same key. As with KeyError, str(collision) names the
|
||||
key in question.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class PropagateError(Exception):
|
||||
"""
|
||||
When a DAGPool greenthread terminates with an exception instead of
|
||||
returning a result, attempting to retrieve its value raises
|
||||
PropagateError.
|
||||
|
||||
Attributes:
|
||||
|
||||
key
|
||||
the key of the greenthread which raised the exception
|
||||
|
||||
exc
|
||||
the exception object raised by the greenthread
|
||||
"""
|
||||
def __init__(self, key, exc):
|
||||
# initialize base class with a reasonable string message
|
||||
msg = "PropagateError({}): {}: {}" \
|
||||
.format(key, exc.__class__.__name__, exc)
|
||||
super().__init__(msg)
|
||||
self.msg = msg
|
||||
# Unless we set args, this is unpickleable:
|
||||
# https://bugs.python.org/issue1692335
|
||||
self.args = (key, exc)
|
||||
self.key = key
|
||||
self.exc = exc
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
|
||||
class DAGPool:
|
||||
"""
|
||||
A DAGPool is a pool that constrains greenthreads, not by max concurrency,
|
||||
but by data dependencies.
|
||||
|
||||
This is a way to implement general DAG dependencies. A simple dependency
|
||||
tree (flowing in either direction) can straightforwardly be implemented
|
||||
using recursion and (e.g.)
|
||||
:meth:`GreenThread.imap() <eventlet.greenthread.GreenThread.imap>`.
|
||||
What gets complicated is when a given node depends on several other nodes
|
||||
as well as contributing to several other nodes.
|
||||
|
||||
With DAGPool, you concurrently launch all applicable greenthreads; each
|
||||
will proceed as soon as it has all required inputs. The DAG is implicit in
|
||||
which items are required by each greenthread.
|
||||
|
||||
Each greenthread is launched in a DAGPool with a key: any value that can
|
||||
serve as a Python dict key. The caller also specifies an iterable of other
|
||||
keys on which this greenthread depends. This iterable may be empty.
|
||||
|
||||
The greenthread callable must accept (key, results), where:
|
||||
|
||||
key
|
||||
is its own key
|
||||
|
||||
results
|
||||
is an iterable of (key, value) pairs.
|
||||
|
||||
A newly-launched DAGPool greenthread is entered immediately, and can
|
||||
perform any necessary setup work. At some point it will iterate over the
|
||||
(key, value) pairs from the passed 'results' iterable. Doing so blocks the
|
||||
greenthread until a value is available for each of the keys specified in
|
||||
its initial dependencies iterable. These (key, value) pairs are delivered
|
||||
in chronological order, *not* the order in which they are initially
|
||||
specified: each value will be delivered as soon as it becomes available.
|
||||
|
||||
The value returned by a DAGPool greenthread becomes the value for its
|
||||
key, which unblocks any other greenthreads waiting on that key.
|
||||
|
||||
If a DAGPool greenthread terminates with an exception instead of returning
|
||||
a value, attempting to retrieve the value raises :class:`PropagateError`,
|
||||
which binds the key of the original greenthread and the original
|
||||
exception. Unless the greenthread attempting to retrieve the value handles
|
||||
PropagateError, that exception will in turn be wrapped in a PropagateError
|
||||
of its own, and so forth. The code that ultimately handles PropagateError
|
||||
can follow the chain of PropagateError.exc attributes to discover the flow
|
||||
of that exception through the DAG of greenthreads.
|
||||
|
||||
External greenthreads may also interact with a DAGPool. See :meth:`wait_each`,
|
||||
:meth:`waitall`, :meth:`post`.
|
||||
|
||||
It is not recommended to constrain external DAGPool producer greenthreads
|
||||
in a :class:`GreenPool <eventlet.greenpool.GreenPool>`: it may be hard to
|
||||
provably avoid deadlock.
|
||||
|
||||
.. automethod:: __init__
|
||||
.. automethod:: __getitem__
|
||||
"""
|
||||
|
||||
_Coro = collections.namedtuple("_Coro", ("greenthread", "pending"))
|
||||
|
||||
def __init__(self, preload={}):
|
||||
"""
|
||||
DAGPool can be prepopulated with an initial dict or iterable of (key,
|
||||
value) pairs. These (key, value) pairs are of course immediately
|
||||
available for any greenthread that depends on any of those keys.
|
||||
"""
|
||||
try:
|
||||
# If a dict is passed, copy it. Don't risk a subsequent
|
||||
# modification to passed dict affecting our internal state.
|
||||
iteritems = preload.items()
|
||||
except AttributeError:
|
||||
# Not a dict, just an iterable of (key, value) pairs
|
||||
iteritems = preload
|
||||
|
||||
# Load the initial dict
|
||||
self.values = dict(iteritems)
|
||||
|
||||
# track greenthreads
|
||||
self.coros = {}
|
||||
|
||||
# The key to blocking greenthreads is the Event.
|
||||
self.event = Event()
|
||||
|
||||
def waitall(self):
|
||||
"""
|
||||
waitall() blocks the calling greenthread until there is a value for
|
||||
every DAGPool greenthread launched by :meth:`spawn`. It returns a dict
|
||||
containing all :class:`preload data <DAGPool>`, all data from
|
||||
:meth:`post` and all values returned by spawned greenthreads.
|
||||
|
||||
See also :meth:`wait`.
|
||||
"""
|
||||
# waitall() is an alias for compatibility with GreenPool
|
||||
return self.wait()
|
||||
|
||||
def wait(self, keys=_MISSING):
|
||||
"""
|
||||
*keys* is an optional iterable of keys. If you omit the argument, it
|
||||
waits for all the keys from :class:`preload data <DAGPool>`, from
|
||||
:meth:`post` calls and from :meth:`spawn` calls: in other words, all
|
||||
the keys of which this DAGPool is aware.
|
||||
|
||||
wait() blocks the calling greenthread until all of the relevant keys
|
||||
have values. wait() returns a dict whose keys are the relevant keys,
|
||||
and whose values come from the *preload* data, from values returned by
|
||||
DAGPool greenthreads or from :meth:`post` calls.
|
||||
|
||||
If a DAGPool greenthread terminates with an exception, wait() will
|
||||
raise :class:`PropagateError` wrapping that exception. If more than
|
||||
one greenthread terminates with an exception, it is indeterminate
|
||||
which one wait() will raise.
|
||||
|
||||
If an external greenthread posts a :class:`PropagateError` instance,
|
||||
wait() will raise that PropagateError. If more than one greenthread
|
||||
posts PropagateError, it is indeterminate which one wait() will raise.
|
||||
|
||||
See also :meth:`wait_each_success`, :meth:`wait_each_exception`.
|
||||
"""
|
||||
# This is mostly redundant with wait_each() functionality.
|
||||
return dict(self.wait_each(keys))
|
||||
|
||||
def wait_each(self, keys=_MISSING):
|
||||
"""
|
||||
*keys* is an optional iterable of keys. If you omit the argument, it
|
||||
waits for all the keys from :class:`preload data <DAGPool>`, from
|
||||
:meth:`post` calls and from :meth:`spawn` calls: in other words, all
|
||||
the keys of which this DAGPool is aware.
|
||||
|
||||
wait_each() is a generator producing (key, value) pairs as a value
|
||||
becomes available for each requested key. wait_each() blocks the
|
||||
calling greenthread until the next value becomes available. If the
|
||||
DAGPool was prepopulated with values for any of the relevant keys, of
|
||||
course those can be delivered immediately without waiting.
|
||||
|
||||
Delivery order is intentionally decoupled from the initial sequence of
|
||||
keys: each value is delivered as soon as it becomes available. If
|
||||
multiple keys are available at the same time, wait_each() delivers
|
||||
each of the ready ones in arbitrary order before blocking again.
|
||||
|
||||
The DAGPool does not distinguish between a value returned by one of
|
||||
its own greenthreads and one provided by a :meth:`post` call or *preload* data.
|
||||
|
||||
The wait_each() generator terminates (raises StopIteration) when all
|
||||
specified keys have been delivered. Thus, typical usage might be:
|
||||
|
||||
::
|
||||
|
||||
for key, value in dagpool.wait_each(keys):
|
||||
# process this ready key and value
|
||||
# continue processing now that we've gotten values for all keys
|
||||
|
||||
By implication, if you pass wait_each() an empty iterable of keys, it
|
||||
returns immediately without yielding anything.
|
||||
|
||||
If the value to be delivered is a :class:`PropagateError` exception object, the
|
||||
generator raises that PropagateError instead of yielding it.
|
||||
|
||||
See also :meth:`wait_each_success`, :meth:`wait_each_exception`.
|
||||
"""
|
||||
# Build a local set() and then call _wait_each().
|
||||
return self._wait_each(self._get_keyset_for_wait_each(keys))
|
||||
|
||||
def wait_each_success(self, keys=_MISSING):
|
||||
"""
|
||||
wait_each_success() filters results so that only success values are
|
||||
yielded. In other words, unlike :meth:`wait_each`, wait_each_success()
|
||||
will not raise :class:`PropagateError`. Not every provided (or
|
||||
defaulted) key will necessarily be represented, though naturally the
|
||||
generator will not finish until all have completed.
|
||||
|
||||
In all other respects, wait_each_success() behaves like :meth:`wait_each`.
|
||||
"""
|
||||
for key, value in self._wait_each_raw(self._get_keyset_for_wait_each(keys)):
|
||||
if not isinstance(value, PropagateError):
|
||||
yield key, value
|
||||
|
||||
def wait_each_exception(self, keys=_MISSING):
|
||||
"""
|
||||
wait_each_exception() filters results so that only exceptions are
|
||||
yielded. Not every provided (or defaulted) key will necessarily be
|
||||
represented, though naturally the generator will not finish until
|
||||
all have completed.
|
||||
|
||||
Unlike other DAGPool methods, wait_each_exception() simply yields
|
||||
:class:`PropagateError` instances as values rather than raising them.
|
||||
|
||||
In all other respects, wait_each_exception() behaves like :meth:`wait_each`.
|
||||
"""
|
||||
for key, value in self._wait_each_raw(self._get_keyset_for_wait_each(keys)):
|
||||
if isinstance(value, PropagateError):
|
||||
yield key, value
|
||||
|
||||
def _get_keyset_for_wait_each(self, keys):
|
||||
"""
|
||||
wait_each(), wait_each_success() and wait_each_exception() promise
|
||||
that if you pass an iterable of keys, the method will wait for results
|
||||
from those keys -- but if you omit the keys argument, the method will
|
||||
wait for results from all known keys. This helper implements that
|
||||
distinction, returning a set() of the relevant keys.
|
||||
"""
|
||||
if keys is not _MISSING:
|
||||
return set(keys)
|
||||
else:
|
||||
# keys arg omitted -- use all the keys we know about
|
||||
return set(self.coros.keys()) | set(self.values.keys())
|
||||
|
||||
def _wait_each(self, pending):
|
||||
"""
|
||||
When _wait_each() encounters a value of PropagateError, it raises it.
|
||||
|
||||
In all other respects, _wait_each() behaves like _wait_each_raw().
|
||||
"""
|
||||
for key, value in self._wait_each_raw(pending):
|
||||
yield key, self._value_or_raise(value)
|
||||
|
||||
@staticmethod
|
||||
def _value_or_raise(value):
|
||||
# Most methods attempting to deliver PropagateError should raise that
|
||||
# instead of simply returning it.
|
||||
if isinstance(value, PropagateError):
|
||||
raise value
|
||||
return value
|
||||
|
||||
def _wait_each_raw(self, pending):
|
||||
"""
|
||||
pending is a set() of keys for which we intend to wait. THIS SET WILL
|
||||
BE DESTRUCTIVELY MODIFIED: as each key acquires a value, that key will
|
||||
be removed from the passed 'pending' set.
|
||||
|
||||
_wait_each_raw() does not treat a PropagateError instance specially:
|
||||
it will be yielded to the caller like any other value.
|
||||
|
||||
In all other respects, _wait_each_raw() behaves like wait_each().
|
||||
"""
|
||||
while True:
|
||||
# Before even waiting, show caller any (key, value) pairs that
|
||||
# are already available. Copy 'pending' because we want to be able
|
||||
# to remove items from the original set while iterating.
|
||||
for key in pending.copy():
|
||||
value = self.values.get(key, _MISSING)
|
||||
if value is not _MISSING:
|
||||
# found one, it's no longer pending
|
||||
pending.remove(key)
|
||||
yield (key, value)
|
||||
|
||||
if not pending:
|
||||
# Once we've yielded all the caller's keys, done.
|
||||
break
|
||||
|
||||
# There are still more keys pending, so wait.
|
||||
self.event.wait()
|
||||
|
||||
def spawn(self, key, depends, function, *args, **kwds):
|
||||
"""
|
||||
Launch the passed *function(key, results, ...)* as a greenthread,
|
||||
passing it:
|
||||
|
||||
- the specified *key*
|
||||
- an iterable of (key, value) pairs
|
||||
- whatever other positional args or keywords you specify.
|
||||
|
||||
Iterating over the *results* iterable behaves like calling
|
||||
:meth:`wait_each(depends) <DAGPool.wait_each>`.
|
||||
|
||||
Returning from *function()* behaves like
|
||||
:meth:`post(key, return_value) <DAGPool.post>`.
|
||||
|
||||
If *function()* terminates with an exception, that exception is wrapped
|
||||
in :class:`PropagateError` with the greenthread's *key* and (effectively) posted
|
||||
as the value for that key. Attempting to retrieve that value will
|
||||
raise that PropagateError.
|
||||
|
||||
Thus, if the greenthread with key 'a' terminates with an exception,
|
||||
and greenthread 'b' depends on 'a', when greenthread 'b' attempts to
|
||||
iterate through its *results* argument, it will encounter
|
||||
PropagateError. So by default, an uncaught exception will propagate
|
||||
through all the downstream dependencies.
|
||||
|
||||
If you pass :meth:`spawn` a key already passed to spawn() or :meth:`post`, spawn()
|
||||
raises :class:`Collision`.
|
||||
"""
|
||||
if key in self.coros or key in self.values:
|
||||
raise Collision(key)
|
||||
|
||||
# The order is a bit tricky. First construct the set() of keys.
|
||||
pending = set(depends)
|
||||
# It's important that we pass to _wait_each() the same 'pending' set()
|
||||
# that we store in self.coros for this key. The generator-iterator
|
||||
# returned by _wait_each() becomes the function's 'results' iterable.
|
||||
newcoro = greenthread.spawn(self._wrapper, function, key,
|
||||
self._wait_each(pending),
|
||||
*args, **kwds)
|
||||
# Also capture the same (!) set in the new _Coro object for this key.
|
||||
# We must be able to observe ready keys being removed from the set.
|
||||
self.coros[key] = self._Coro(newcoro, pending)
|
||||
|
||||
def _wrapper(self, function, key, results, *args, **kwds):
|
||||
"""
|
||||
This wrapper runs the top-level function in a DAGPool greenthread,
|
||||
posting its return value (or PropagateError) to the DAGPool.
|
||||
"""
|
||||
try:
|
||||
# call our passed function
|
||||
result = function(key, results, *args, **kwds)
|
||||
except Exception as err:
|
||||
# Wrap any exception it may raise in a PropagateError.
|
||||
result = PropagateError(key, err)
|
||||
finally:
|
||||
# function() has returned (or terminated with an exception). We no
|
||||
# longer need to track this greenthread in self.coros. Remove it
|
||||
# first so post() won't complain about a running greenthread.
|
||||
del self.coros[key]
|
||||
|
||||
try:
|
||||
# as advertised, try to post() our return value
|
||||
self.post(key, result)
|
||||
except Collision:
|
||||
# if we've already post()ed a result, oh well
|
||||
pass
|
||||
|
||||
# also, in case anyone cares...
|
||||
return result
|
||||
|
||||
def spawn_many(self, depends, function, *args, **kwds):
|
||||
"""
|
||||
spawn_many() accepts a single *function* whose parameters are the same
|
||||
as for :meth:`spawn`.
|
||||
|
||||
The difference is that spawn_many() accepts a dependency dict
|
||||
*depends*. A new greenthread is spawned for each key in the dict. That
|
||||
dict key's value should be an iterable of other keys on which this
|
||||
greenthread depends.
|
||||
|
||||
If the *depends* dict contains any key already passed to :meth:`spawn`
|
||||
or :meth:`post`, spawn_many() raises :class:`Collision`. It is
|
||||
indeterminate how many of the other keys in *depends* will have
|
||||
successfully spawned greenthreads.
|
||||
"""
|
||||
# Iterate over 'depends' items, relying on self.spawn() not to
|
||||
# context-switch so no one can modify 'depends' along the way.
|
||||
for key, deps in depends.items():
|
||||
self.spawn(key, deps, function, *args, **kwds)
|
||||
|
||||
def kill(self, key):
|
||||
"""
|
||||
Kill the greenthread that was spawned with the specified *key*.
|
||||
|
||||
If no such greenthread was spawned, raise KeyError.
|
||||
"""
|
||||
# let KeyError, if any, propagate
|
||||
self.coros[key].greenthread.kill()
|
||||
# once killed, remove it
|
||||
del self.coros[key]
|
||||
|
||||
def post(self, key, value, replace=False):
|
||||
"""
|
||||
post(key, value) stores the passed *value* for the passed *key*. It
|
||||
then causes each greenthread blocked on its results iterable, or on
|
||||
:meth:`wait_each(keys) <DAGPool.wait_each>`, to check for new values.
|
||||
A waiting greenthread might not literally resume on every single
|
||||
post() of a relevant key, but the first post() of a relevant key
|
||||
ensures that it will resume eventually, and when it does it will catch
|
||||
up with all relevant post() calls.
|
||||
|
||||
Calling post(key, value) when there is a running greenthread with that
|
||||
same *key* raises :class:`Collision`. If you must post(key, value) instead of
|
||||
letting the greenthread run to completion, you must first call
|
||||
:meth:`kill(key) <DAGPool.kill>`.
|
||||
|
||||
The DAGPool implicitly post()s the return value from each of its
|
||||
greenthreads. But a greenthread may explicitly post() a value for its
|
||||
own key, which will cause its return value to be discarded.
|
||||
|
||||
Calling post(key, value, replace=False) (the default *replace*) when a
|
||||
value for that key has already been posted, by any means, raises
|
||||
:class:`Collision`.
|
||||
|
||||
Calling post(key, value, replace=True) when a value for that key has
|
||||
already been posted, by any means, replaces the previously-stored
|
||||
value. However, that may make it complicated to reason about the
|
||||
behavior of greenthreads waiting on that key.
|
||||
|
||||
After a post(key, value1) followed by post(key, value2, replace=True),
|
||||
it is unspecified which pending :meth:`wait_each([key...]) <DAGPool.wait_each>`
|
||||
calls (or greenthreads iterating over *results* involving that key)
|
||||
will observe *value1* versus *value2*. It is guaranteed that
|
||||
subsequent wait_each([key...]) calls (or greenthreads spawned after
|
||||
that point) will observe *value2*.
|
||||
|
||||
A successful call to
|
||||
post(key, :class:`PropagateError(key, ExceptionSubclass) <PropagateError>`)
|
||||
ensures that any subsequent attempt to retrieve that key's value will
|
||||
raise that PropagateError instance.
|
||||
"""
|
||||
# First, check if we're trying to post() to a key with a running
|
||||
# greenthread.
|
||||
# A DAGPool greenthread is explicitly permitted to post() to its
|
||||
# OWN key.
|
||||
coro = self.coros.get(key, _MISSING)
|
||||
if coro is not _MISSING and coro.greenthread is not greenthread.getcurrent():
|
||||
# oh oh, trying to post a value for running greenthread from
|
||||
# some other greenthread
|
||||
raise Collision(key)
|
||||
|
||||
# Here, either we're posting a value for a key with no greenthread or
|
||||
# we're posting from that greenthread itself.
|
||||
|
||||
# Has somebody already post()ed a value for this key?
|
||||
# Unless replace == True, this is a problem.
|
||||
if key in self.values and not replace:
|
||||
raise Collision(key)
|
||||
|
||||
# Either we've never before posted a value for this key, or we're
|
||||
# posting with replace == True.
|
||||
|
||||
# update our database
|
||||
self.values[key] = value
|
||||
# and wake up pending waiters
|
||||
self.event.send()
|
||||
# The comment in Event.reset() says: "it's better to create a new
|
||||
# event rather than reset an old one". Okay, fine. We do want to be
|
||||
# able to support new waiters, so create a new Event.
|
||||
self.event = Event()
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""
|
||||
__getitem__(key) (aka dagpool[key]) blocks until *key* has a value,
|
||||
then delivers that value.
|
||||
"""
|
||||
# This is a degenerate case of wait_each(). Construct a tuple
|
||||
# containing only this 'key'. wait_each() will yield exactly one (key,
|
||||
# value) pair. Return just its value.
|
||||
for _, value in self.wait_each((key,)):
|
||||
return value
|
||||
|
||||
def get(self, key, default=None):
|
||||
"""
|
||||
get() returns the value for *key*. If *key* does not yet have a value,
|
||||
get() returns *default*.
|
||||
"""
|
||||
return self._value_or_raise(self.values.get(key, default))
|
||||
|
||||
def keys(self):
|
||||
"""
|
||||
Return a snapshot tuple of keys for which we currently have values.
|
||||
"""
|
||||
# Explicitly return a copy rather than an iterator: don't assume our
|
||||
# caller will finish iterating before new values are posted.
|
||||
return tuple(self.values.keys())
|
||||
|
||||
def items(self):
|
||||
"""
|
||||
Return a snapshot tuple of currently-available (key, value) pairs.
|
||||
"""
|
||||
# Don't assume our caller will finish iterating before new values are
|
||||
# posted.
|
||||
return tuple((key, self._value_or_raise(value))
|
||||
for key, value in self.values.items())
|
||||
|
||||
def running(self):
|
||||
"""
|
||||
Return number of running DAGPool greenthreads. This includes
|
||||
greenthreads blocked while iterating through their *results* iterable,
|
||||
that is, greenthreads waiting on values from other keys.
|
||||
"""
|
||||
return len(self.coros)
|
||||
|
||||
def running_keys(self):
|
||||
"""
|
||||
Return keys for running DAGPool greenthreads. This includes
|
||||
greenthreads blocked while iterating through their *results* iterable,
|
||||
that is, greenthreads waiting on values from other keys.
|
||||
"""
|
||||
# return snapshot; don't assume caller will finish iterating before we
|
||||
# next modify self.coros
|
||||
return tuple(self.coros.keys())
|
||||
|
||||
def waiting(self):
|
||||
"""
|
||||
Return number of waiting DAGPool greenthreads, that is, greenthreads
|
||||
still waiting on values from other keys. This explicitly does *not*
|
||||
include external greenthreads waiting on :meth:`wait`,
|
||||
:meth:`waitall`, :meth:`wait_each`.
|
||||
"""
|
||||
# n.b. if Event would provide a count of its waiters, we could say
|
||||
# something about external greenthreads as well.
|
||||
# The logic to determine this count is exactly the same as the general
|
||||
# waiting_for() call.
|
||||
return len(self.waiting_for())
|
||||
|
||||
# Use _MISSING instead of None as the default 'key' param so we can permit
|
||||
# None as a supported key.
|
||||
def waiting_for(self, key=_MISSING):
|
||||
"""
|
||||
waiting_for(key) returns a set() of the keys for which the DAGPool
|
||||
greenthread spawned with that *key* is still waiting. If you pass a
|
||||
*key* for which no greenthread was spawned, waiting_for() raises
|
||||
KeyError.
|
||||
|
||||
waiting_for() without argument returns a dict. Its keys are the keys
|
||||
of DAGPool greenthreads still waiting on one or more values. In the
|
||||
returned dict, the value of each such key is the set of other keys for
|
||||
which that greenthread is still waiting.
|
||||
|
||||
This method allows diagnosing a "hung" DAGPool. If certain
|
||||
greenthreads are making no progress, it's possible that they are
|
||||
waiting on keys for which there is no greenthread and no :meth:`post` data.
|
||||
"""
|
||||
# We may have greenthreads whose 'pending' entry indicates they're
|
||||
# waiting on some keys even though values have now been posted for
|
||||
# some or all of those keys, because those greenthreads have not yet
|
||||
# regained control since values were posted. So make a point of
|
||||
# excluding values that are now available.
|
||||
available = set(self.values.keys())
|
||||
|
||||
if key is not _MISSING:
|
||||
# waiting_for(key) is semantically different than waiting_for().
|
||||
# It's just that they both seem to want the same method name.
|
||||
coro = self.coros.get(key, _MISSING)
|
||||
if coro is _MISSING:
|
||||
# Hmm, no running greenthread with this key. But was there
|
||||
# EVER a greenthread with this key? If not, let KeyError
|
||||
# propagate.
|
||||
self.values[key]
|
||||
# Oh good, there's a value for this key. Either the
|
||||
# greenthread finished, or somebody posted a value. Just say
|
||||
# the greenthread isn't waiting for anything.
|
||||
return set()
|
||||
else:
|
||||
# coro is the _Coro for the running greenthread with the
|
||||
# specified key.
|
||||
return coro.pending - available
|
||||
|
||||
# This is a waiting_for() call, i.e. a general query rather than for a
|
||||
# specific key.
|
||||
|
||||
# Start by iterating over (key, coro) pairs in self.coros. Generate
|
||||
# (key, pending) pairs in which 'pending' is the set of keys on which
|
||||
# the greenthread believes it's waiting, minus the set of keys that
|
||||
# are now available. Filter out any pair in which 'pending' is empty,
|
||||
# that is, that greenthread will be unblocked next time it resumes.
|
||||
# Make a dict from those pairs.
|
||||
return {key: pending
|
||||
for key, pending in ((key, (coro.pending - available))
|
||||
for key, coro in self.coros.items())
|
||||
if pending}
|
||||
460
venv/lib/python3.12/site-packages/eventlet/db_pool.py
Normal file
460
venv/lib/python3.12/site-packages/eventlet/db_pool.py
Normal file
@ -0,0 +1,460 @@
|
||||
from collections import deque
|
||||
from contextlib import contextmanager
|
||||
import sys
|
||||
import time
|
||||
|
||||
from eventlet.pools import Pool
|
||||
from eventlet import timeout
|
||||
from eventlet import hubs
|
||||
from eventlet.hubs.timer import Timer
|
||||
from eventlet.greenthread import GreenThread
|
||||
|
||||
|
||||
_MISSING = object()
|
||||
|
||||
|
||||
class ConnectTimeout(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def cleanup_rollback(conn):
|
||||
conn.rollback()
|
||||
|
||||
|
||||
class BaseConnectionPool(Pool):
|
||||
def __init__(self, db_module,
|
||||
min_size=0, max_size=4,
|
||||
max_idle=10, max_age=30,
|
||||
connect_timeout=5,
|
||||
cleanup=cleanup_rollback,
|
||||
*args, **kwargs):
|
||||
"""
|
||||
Constructs a pool with at least *min_size* connections and at most
|
||||
*max_size* connections. Uses *db_module* to construct new connections.
|
||||
|
||||
The *max_idle* parameter determines how long pooled connections can
|
||||
remain idle, in seconds. After *max_idle* seconds have elapsed
|
||||
without the connection being used, the pool closes the connection.
|
||||
|
||||
*max_age* is how long any particular connection is allowed to live.
|
||||
Connections that have been open for longer than *max_age* seconds are
|
||||
closed, regardless of idle time. If *max_age* is 0, all connections are
|
||||
closed on return to the pool, reducing it to a concurrency limiter.
|
||||
|
||||
*connect_timeout* is the duration in seconds that the pool will wait
|
||||
before timing out on connect() to the database. If triggered, the
|
||||
timeout will raise a ConnectTimeout from get().
|
||||
|
||||
The remainder of the arguments are used as parameters to the
|
||||
*db_module*'s connection constructor.
|
||||
"""
|
||||
assert(db_module)
|
||||
self._db_module = db_module
|
||||
self._args = args
|
||||
self._kwargs = kwargs
|
||||
self.max_idle = max_idle
|
||||
self.max_age = max_age
|
||||
self.connect_timeout = connect_timeout
|
||||
self._expiration_timer = None
|
||||
self.cleanup = cleanup
|
||||
super().__init__(min_size=min_size, max_size=max_size, order_as_stack=True)
|
||||
|
||||
def _schedule_expiration(self):
|
||||
"""Sets up a timer that will call _expire_old_connections when the
|
||||
oldest connection currently in the free pool is ready to expire. This
|
||||
is the earliest possible time that a connection could expire, thus, the
|
||||
timer will be running as infrequently as possible without missing a
|
||||
possible expiration.
|
||||
|
||||
If this function is called when a timer is already scheduled, it does
|
||||
nothing.
|
||||
|
||||
If max_age or max_idle is 0, _schedule_expiration likewise does nothing.
|
||||
"""
|
||||
if self.max_age == 0 or self.max_idle == 0:
|
||||
# expiration is unnecessary because all connections will be expired
|
||||
# on put
|
||||
return
|
||||
|
||||
if (self._expiration_timer is not None
|
||||
and not getattr(self._expiration_timer, 'called', False)):
|
||||
# the next timer is already scheduled
|
||||
return
|
||||
|
||||
try:
|
||||
now = time.time()
|
||||
self._expire_old_connections(now)
|
||||
# the last item in the list, because of the stack ordering,
|
||||
# is going to be the most-idle
|
||||
idle_delay = (self.free_items[-1][0] - now) + self.max_idle
|
||||
oldest = min([t[1] for t in self.free_items])
|
||||
age_delay = (oldest - now) + self.max_age
|
||||
|
||||
next_delay = min(idle_delay, age_delay)
|
||||
except (IndexError, ValueError):
|
||||
# no free items, unschedule ourselves
|
||||
self._expiration_timer = None
|
||||
return
|
||||
|
||||
if next_delay > 0:
|
||||
# set up a continuous self-calling loop
|
||||
self._expiration_timer = Timer(next_delay, GreenThread(hubs.get_hub().greenlet).switch,
|
||||
self._schedule_expiration, [], {})
|
||||
self._expiration_timer.schedule()
|
||||
|
||||
def _expire_old_connections(self, now):
|
||||
"""Iterates through the open connections contained in the pool, closing
|
||||
ones that have remained idle for longer than max_idle seconds, or have
|
||||
been in existence for longer than max_age seconds.
|
||||
|
||||
*now* is the current time, as returned by time.time().
|
||||
"""
|
||||
original_count = len(self.free_items)
|
||||
expired = [
|
||||
conn
|
||||
for last_used, created_at, conn in self.free_items
|
||||
if self._is_expired(now, last_used, created_at)]
|
||||
|
||||
new_free = [
|
||||
(last_used, created_at, conn)
|
||||
for last_used, created_at, conn in self.free_items
|
||||
if not self._is_expired(now, last_used, created_at)]
|
||||
self.free_items.clear()
|
||||
self.free_items.extend(new_free)
|
||||
|
||||
# adjust the current size counter to account for expired
|
||||
# connections
|
||||
self.current_size -= original_count - len(self.free_items)
|
||||
|
||||
for conn in expired:
|
||||
self._safe_close(conn, quiet=True)
|
||||
|
||||
def _is_expired(self, now, last_used, created_at):
|
||||
"""Returns true and closes the connection if it's expired.
|
||||
"""
|
||||
if (self.max_idle <= 0 or self.max_age <= 0
|
||||
or now - last_used > self.max_idle
|
||||
or now - created_at > self.max_age):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _unwrap_connection(self, conn):
|
||||
"""If the connection was wrapped by a subclass of
|
||||
BaseConnectionWrapper and is still functional (as determined
|
||||
by the __nonzero__, or __bool__ in python3, method), returns
|
||||
the unwrapped connection. If anything goes wrong with this
|
||||
process, returns None.
|
||||
"""
|
||||
base = None
|
||||
try:
|
||||
if conn:
|
||||
base = conn._base
|
||||
conn._destroy()
|
||||
else:
|
||||
base = None
|
||||
except AttributeError:
|
||||
pass
|
||||
return base
|
||||
|
||||
def _safe_close(self, conn, quiet=False):
|
||||
"""Closes the (already unwrapped) connection, squelching any
|
||||
exceptions.
|
||||
"""
|
||||
try:
|
||||
conn.close()
|
||||
except AttributeError:
|
||||
pass # conn is None, or junk
|
||||
except Exception:
|
||||
if not quiet:
|
||||
print("Connection.close raised: %s" % (sys.exc_info()[1]))
|
||||
|
||||
def get(self):
|
||||
conn = super().get()
|
||||
|
||||
# None is a flag value that means that put got called with
|
||||
# something it couldn't use
|
||||
if conn is None:
|
||||
try:
|
||||
conn = self.create()
|
||||
except Exception:
|
||||
# unconditionally increase the free pool because
|
||||
# even if there are waiters, doing a full put
|
||||
# would incur a greenlib switch and thus lose the
|
||||
# exception stack
|
||||
self.current_size -= 1
|
||||
raise
|
||||
|
||||
# if the call to get() draws from the free pool, it will come
|
||||
# back as a tuple
|
||||
if isinstance(conn, tuple):
|
||||
_last_used, created_at, conn = conn
|
||||
else:
|
||||
created_at = time.time()
|
||||
|
||||
# wrap the connection so the consumer can call close() safely
|
||||
wrapped = PooledConnectionWrapper(conn, self)
|
||||
# annotating the wrapper so that when it gets put in the pool
|
||||
# again, we'll know how old it is
|
||||
wrapped._db_pool_created_at = created_at
|
||||
return wrapped
|
||||
|
||||
def put(self, conn, cleanup=_MISSING):
|
||||
created_at = getattr(conn, '_db_pool_created_at', 0)
|
||||
now = time.time()
|
||||
conn = self._unwrap_connection(conn)
|
||||
|
||||
if self._is_expired(now, now, created_at):
|
||||
self._safe_close(conn, quiet=False)
|
||||
conn = None
|
||||
elif cleanup is not None:
|
||||
if cleanup is _MISSING:
|
||||
cleanup = self.cleanup
|
||||
# by default, call rollback in case the connection is in the middle
|
||||
# of a transaction. However, rollback has performance implications
|
||||
# so optionally do nothing or call something else like ping
|
||||
try:
|
||||
if conn:
|
||||
cleanup(conn)
|
||||
except Exception as e:
|
||||
# we don't care what the exception was, we just know the
|
||||
# connection is dead
|
||||
print("WARNING: cleanup %s raised: %s" % (cleanup, e))
|
||||
conn = None
|
||||
except:
|
||||
conn = None
|
||||
raise
|
||||
|
||||
if conn is not None:
|
||||
super().put((now, created_at, conn))
|
||||
else:
|
||||
# wake up any waiters with a flag value that indicates
|
||||
# they need to manufacture a connection
|
||||
if self.waiting() > 0:
|
||||
super().put(None)
|
||||
else:
|
||||
# no waiters -- just change the size
|
||||
self.current_size -= 1
|
||||
self._schedule_expiration()
|
||||
|
||||
@contextmanager
|
||||
def item(self, cleanup=_MISSING):
|
||||
conn = self.get()
|
||||
try:
|
||||
yield conn
|
||||
finally:
|
||||
self.put(conn, cleanup=cleanup)
|
||||
|
||||
def clear(self):
|
||||
"""Close all connections that this pool still holds a reference to,
|
||||
and removes all references to them.
|
||||
"""
|
||||
if self._expiration_timer:
|
||||
self._expiration_timer.cancel()
|
||||
free_items, self.free_items = self.free_items, deque()
|
||||
for item in free_items:
|
||||
# Free items created using min_size>0 are not tuples.
|
||||
conn = item[2] if isinstance(item, tuple) else item
|
||||
self._safe_close(conn, quiet=True)
|
||||
self.current_size -= 1
|
||||
|
||||
def __del__(self):
|
||||
self.clear()
|
||||
|
||||
|
||||
class TpooledConnectionPool(BaseConnectionPool):
|
||||
"""A pool which gives out :class:`~eventlet.tpool.Proxy`-based database
|
||||
connections.
|
||||
"""
|
||||
|
||||
def create(self):
|
||||
now = time.time()
|
||||
return now, now, self.connect(
|
||||
self._db_module, self.connect_timeout, *self._args, **self._kwargs)
|
||||
|
||||
@classmethod
|
||||
def connect(cls, db_module, connect_timeout, *args, **kw):
|
||||
t = timeout.Timeout(connect_timeout, ConnectTimeout())
|
||||
try:
|
||||
from eventlet import tpool
|
||||
conn = tpool.execute(db_module.connect, *args, **kw)
|
||||
return tpool.Proxy(conn, autowrap_names=('cursor',))
|
||||
finally:
|
||||
t.cancel()
|
||||
|
||||
|
||||
class RawConnectionPool(BaseConnectionPool):
|
||||
"""A pool which gives out plain database connections.
|
||||
"""
|
||||
|
||||
def create(self):
|
||||
now = time.time()
|
||||
return now, now, self.connect(
|
||||
self._db_module, self.connect_timeout, *self._args, **self._kwargs)
|
||||
|
||||
@classmethod
|
||||
def connect(cls, db_module, connect_timeout, *args, **kw):
|
||||
t = timeout.Timeout(connect_timeout, ConnectTimeout())
|
||||
try:
|
||||
return db_module.connect(*args, **kw)
|
||||
finally:
|
||||
t.cancel()
|
||||
|
||||
|
||||
# default connection pool is the tpool one
|
||||
ConnectionPool = TpooledConnectionPool
|
||||
|
||||
|
||||
class GenericConnectionWrapper:
|
||||
def __init__(self, baseconn):
|
||||
self._base = baseconn
|
||||
|
||||
# Proxy all method calls to self._base
|
||||
# FIXME: remove repetition; options to consider:
|
||||
# * for name in (...):
|
||||
# setattr(class, name, lambda self, *a, **kw: getattr(self._base, name)(*a, **kw))
|
||||
# * def __getattr__(self, name): if name in (...): return getattr(self._base, name)
|
||||
# * other?
|
||||
def __enter__(self):
|
||||
return self._base.__enter__()
|
||||
|
||||
def __exit__(self, exc, value, tb):
|
||||
return self._base.__exit__(exc, value, tb)
|
||||
|
||||
def __repr__(self):
|
||||
return self._base.__repr__()
|
||||
|
||||
_proxy_funcs = (
|
||||
'affected_rows',
|
||||
'autocommit',
|
||||
'begin',
|
||||
'change_user',
|
||||
'character_set_name',
|
||||
'close',
|
||||
'commit',
|
||||
'cursor',
|
||||
'dump_debug_info',
|
||||
'errno',
|
||||
'error',
|
||||
'errorhandler',
|
||||
'get_server_info',
|
||||
'insert_id',
|
||||
'literal',
|
||||
'ping',
|
||||
'query',
|
||||
'rollback',
|
||||
'select_db',
|
||||
'server_capabilities',
|
||||
'set_character_set',
|
||||
'set_isolation_level',
|
||||
'set_server_option',
|
||||
'set_sql_mode',
|
||||
'show_warnings',
|
||||
'shutdown',
|
||||
'sqlstate',
|
||||
'stat',
|
||||
'store_result',
|
||||
'string_literal',
|
||||
'thread_id',
|
||||
'use_result',
|
||||
'warning_count',
|
||||
)
|
||||
|
||||
|
||||
for _proxy_fun in GenericConnectionWrapper._proxy_funcs:
|
||||
# excess wrapper for early binding (closure by value)
|
||||
def _wrapper(_proxy_fun=_proxy_fun):
|
||||
def _proxy_method(self, *args, **kwargs):
|
||||
return getattr(self._base, _proxy_fun)(*args, **kwargs)
|
||||
_proxy_method.func_name = _proxy_fun
|
||||
_proxy_method.__name__ = _proxy_fun
|
||||
_proxy_method.__qualname__ = 'GenericConnectionWrapper.' + _proxy_fun
|
||||
return _proxy_method
|
||||
setattr(GenericConnectionWrapper, _proxy_fun, _wrapper(_proxy_fun))
|
||||
del GenericConnectionWrapper._proxy_funcs
|
||||
del _proxy_fun
|
||||
del _wrapper
|
||||
|
||||
|
||||
class PooledConnectionWrapper(GenericConnectionWrapper):
|
||||
"""A connection wrapper where:
|
||||
- the close method returns the connection to the pool instead of closing it directly
|
||||
- ``bool(conn)`` returns a reasonable value
|
||||
- returns itself to the pool if it gets garbage collected
|
||||
"""
|
||||
|
||||
def __init__(self, baseconn, pool):
|
||||
super().__init__(baseconn)
|
||||
self._pool = pool
|
||||
|
||||
def __nonzero__(self):
|
||||
return (hasattr(self, '_base') and bool(self._base))
|
||||
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def _destroy(self):
|
||||
self._pool = None
|
||||
try:
|
||||
del self._base
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
"""Return the connection to the pool, and remove the
|
||||
reference to it so that you can't use it again through this
|
||||
wrapper object.
|
||||
"""
|
||||
if self and self._pool:
|
||||
self._pool.put(self)
|
||||
self._destroy()
|
||||
|
||||
def __del__(self):
|
||||
return # this causes some issues if __del__ is called in the
|
||||
# main coroutine, so for now this is disabled
|
||||
# self.close()
|
||||
|
||||
|
||||
class DatabaseConnector:
|
||||
"""
|
||||
This is an object which will maintain a collection of database
|
||||
connection pools on a per-host basis.
|
||||
"""
|
||||
|
||||
def __init__(self, module, credentials,
|
||||
conn_pool=None, *args, **kwargs):
|
||||
"""constructor
|
||||
*module*
|
||||
Database module to use.
|
||||
*credentials*
|
||||
Mapping of hostname to connect arguments (e.g. username and password)
|
||||
"""
|
||||
assert(module)
|
||||
self._conn_pool_class = conn_pool
|
||||
if self._conn_pool_class is None:
|
||||
self._conn_pool_class = ConnectionPool
|
||||
self._module = module
|
||||
self._args = args
|
||||
self._kwargs = kwargs
|
||||
# this is a map of hostname to username/password
|
||||
self._credentials = credentials
|
||||
self._databases = {}
|
||||
|
||||
def credentials_for(self, host):
|
||||
if host in self._credentials:
|
||||
return self._credentials[host]
|
||||
else:
|
||||
return self._credentials.get('default', None)
|
||||
|
||||
def get(self, host, dbname):
|
||||
"""Returns a ConnectionPool to the target host and schema.
|
||||
"""
|
||||
key = (host, dbname)
|
||||
if key not in self._databases:
|
||||
new_kwargs = self._kwargs.copy()
|
||||
new_kwargs['db'] = dbname
|
||||
new_kwargs['host'] = host
|
||||
new_kwargs.update(self.credentials_for(host))
|
||||
dbpool = self._conn_pool_class(
|
||||
self._module, *self._args, **new_kwargs)
|
||||
self._databases[key] = dbpool
|
||||
|
||||
return self._databases[key]
|
||||
218
venv/lib/python3.12/site-packages/eventlet/debug.py
Normal file
218
venv/lib/python3.12/site-packages/eventlet/debug.py
Normal file
@ -0,0 +1,218 @@
|
||||
"""The debug module contains utilities and functions for better
|
||||
debugging Eventlet-powered applications."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import linecache
|
||||
import re
|
||||
import inspect
|
||||
|
||||
__all__ = ['spew', 'unspew', 'format_hub_listeners', 'format_hub_timers',
|
||||
'hub_listener_stacks', 'hub_exceptions', 'tpool_exceptions',
|
||||
'hub_prevent_multiple_readers', 'hub_timer_stacks',
|
||||
'hub_blocking_detection', 'format_asyncio_info',
|
||||
'format_threads_info']
|
||||
|
||||
_token_splitter = re.compile(r'\W+')
|
||||
|
||||
|
||||
class Spew:
|
||||
|
||||
def __init__(self, trace_names=None, show_values=True):
|
||||
self.trace_names = trace_names
|
||||
self.show_values = show_values
|
||||
|
||||
def __call__(self, frame, event, arg):
|
||||
if event == 'line':
|
||||
lineno = frame.f_lineno
|
||||
if '__file__' in frame.f_globals:
|
||||
filename = frame.f_globals['__file__']
|
||||
if (filename.endswith('.pyc') or
|
||||
filename.endswith('.pyo')):
|
||||
filename = filename[:-1]
|
||||
name = frame.f_globals['__name__']
|
||||
line = linecache.getline(filename, lineno)
|
||||
else:
|
||||
name = '[unknown]'
|
||||
try:
|
||||
src = inspect.getsourcelines(frame)
|
||||
line = src[lineno]
|
||||
except OSError:
|
||||
line = 'Unknown code named [%s]. VM instruction #%d' % (
|
||||
frame.f_code.co_name, frame.f_lasti)
|
||||
if self.trace_names is None or name in self.trace_names:
|
||||
print('%s:%s: %s' % (name, lineno, line.rstrip()))
|
||||
if not self.show_values:
|
||||
return self
|
||||
details = []
|
||||
tokens = _token_splitter.split(line)
|
||||
for tok in tokens:
|
||||
if tok in frame.f_globals:
|
||||
details.append('%s=%r' % (tok, frame.f_globals[tok]))
|
||||
if tok in frame.f_locals:
|
||||
details.append('%s=%r' % (tok, frame.f_locals[tok]))
|
||||
if details:
|
||||
print("\t%s" % ' '.join(details))
|
||||
return self
|
||||
|
||||
|
||||
def spew(trace_names=None, show_values=False):
|
||||
"""Install a trace hook which writes incredibly detailed logs
|
||||
about what code is being executed to stdout.
|
||||
"""
|
||||
sys.settrace(Spew(trace_names, show_values))
|
||||
|
||||
|
||||
def unspew():
|
||||
"""Remove the trace hook installed by spew.
|
||||
"""
|
||||
sys.settrace(None)
|
||||
|
||||
|
||||
def format_hub_listeners():
|
||||
""" Returns a formatted string of the current listeners on the current
|
||||
hub. This can be useful in determining what's going on in the event system,
|
||||
especially when used in conjunction with :func:`hub_listener_stacks`.
|
||||
"""
|
||||
from eventlet import hubs
|
||||
hub = hubs.get_hub()
|
||||
result = ['READERS:']
|
||||
for l in hub.get_readers():
|
||||
result.append(repr(l))
|
||||
result.append('WRITERS:')
|
||||
for l in hub.get_writers():
|
||||
result.append(repr(l))
|
||||
return os.linesep.join(result)
|
||||
|
||||
|
||||
def format_asyncio_info():
|
||||
""" Returns a formatted string of the asyncio info.
|
||||
This can be useful in determining what's going on in the asyncio event
|
||||
loop system, especially when used in conjunction with the asyncio hub.
|
||||
"""
|
||||
import asyncio
|
||||
tasks = asyncio.all_tasks()
|
||||
result = ['TASKS:']
|
||||
result.append(repr(tasks))
|
||||
result.append(f'EVENTLOOP: {asyncio.events.get_event_loop()}')
|
||||
return os.linesep.join(result)
|
||||
|
||||
|
||||
def format_threads_info():
|
||||
""" Returns a formatted string of the threads info.
|
||||
This can be useful in determining what's going on with created threads,
|
||||
especially when used in conjunction with greenlet
|
||||
"""
|
||||
import threading
|
||||
threads = threading._active
|
||||
result = ['THREADS:']
|
||||
result.append(repr(threads))
|
||||
return os.linesep.join(result)
|
||||
|
||||
|
||||
def format_hub_timers():
|
||||
""" Returns a formatted string of the current timers on the current
|
||||
hub. This can be useful in determining what's going on in the event system,
|
||||
especially when used in conjunction with :func:`hub_timer_stacks`.
|
||||
"""
|
||||
from eventlet import hubs
|
||||
hub = hubs.get_hub()
|
||||
result = ['TIMERS:']
|
||||
for l in hub.timers:
|
||||
result.append(repr(l))
|
||||
return os.linesep.join(result)
|
||||
|
||||
|
||||
def hub_listener_stacks(state=False):
|
||||
"""Toggles whether or not the hub records the stack when clients register
|
||||
listeners on file descriptors. This can be useful when trying to figure
|
||||
out what the hub is up to at any given moment. To inspect the stacks
|
||||
of the current listeners, call :func:`format_hub_listeners` at critical
|
||||
junctures in the application logic.
|
||||
"""
|
||||
from eventlet import hubs
|
||||
hubs.get_hub().set_debug_listeners(state)
|
||||
|
||||
|
||||
def hub_timer_stacks(state=False):
|
||||
"""Toggles whether or not the hub records the stack when timers are set.
|
||||
To inspect the stacks of the current timers, call :func:`format_hub_timers`
|
||||
at critical junctures in the application logic.
|
||||
"""
|
||||
from eventlet.hubs import timer
|
||||
timer._g_debug = state
|
||||
|
||||
|
||||
def hub_prevent_multiple_readers(state=True):
|
||||
"""Toggle prevention of multiple greenlets reading from a socket
|
||||
|
||||
When multiple greenlets read from the same socket it is often hard
|
||||
to predict which greenlet will receive what data. To achieve
|
||||
resource sharing consider using ``eventlet.pools.Pool`` instead.
|
||||
|
||||
It is important to note that this feature is a debug
|
||||
convenience. That's not a feature made to be integrated in a production
|
||||
code in some sort.
|
||||
|
||||
**If you really know what you are doing** you can change the state
|
||||
to ``False`` to stop the hub from protecting against this mistake. Else
|
||||
we strongly discourage using this feature, or you should consider using it
|
||||
really carefully.
|
||||
|
||||
You should be aware that disabling this prevention will be applied to
|
||||
your entire stack and not only to the context where you may find it useful,
|
||||
meaning that using this debug feature may have several significant
|
||||
unexpected side effects on your process, which could cause race conditions
|
||||
between your sockets and on all your I/O in general.
|
||||
|
||||
You should also notice that this debug convenience is not supported
|
||||
by the Asyncio hub, which is the official plan for migrating off of
|
||||
eventlet. Using this feature will lock your migration path.
|
||||
"""
|
||||
from eventlet.hubs import hub, get_hub
|
||||
from eventlet.hubs import asyncio
|
||||
if not state and isinstance(get_hub(), asyncio.Hub):
|
||||
raise RuntimeError("Multiple readers are not yet supported by asyncio hub")
|
||||
hub.g_prevent_multiple_readers = state
|
||||
|
||||
|
||||
def hub_exceptions(state=True):
|
||||
"""Toggles whether the hub prints exceptions that are raised from its
|
||||
timers. This can be useful to see how greenthreads are terminating.
|
||||
"""
|
||||
from eventlet import hubs
|
||||
hubs.get_hub().set_timer_exceptions(state)
|
||||
from eventlet import greenpool
|
||||
greenpool.DEBUG = state
|
||||
|
||||
|
||||
def tpool_exceptions(state=False):
|
||||
"""Toggles whether tpool itself prints exceptions that are raised from
|
||||
functions that are executed in it, in addition to raising them like
|
||||
it normally does."""
|
||||
from eventlet import tpool
|
||||
tpool.QUIET = not state
|
||||
|
||||
|
||||
def hub_blocking_detection(state=False, resolution=1):
|
||||
"""Toggles whether Eventlet makes an effort to detect blocking
|
||||
behavior in an application.
|
||||
|
||||
It does this by telling the kernel to raise a SIGALARM after a
|
||||
short timeout, and clearing the timeout every time the hub
|
||||
greenlet is resumed. Therefore, any code that runs for a long
|
||||
time without yielding to the hub will get interrupted by the
|
||||
blocking detector (don't use it in production!).
|
||||
|
||||
The *resolution* argument governs how long the SIGALARM timeout
|
||||
waits in seconds. The implementation uses :func:`signal.setitimer`
|
||||
and can be specified as a floating-point value.
|
||||
The shorter the resolution, the greater the chance of false
|
||||
positives.
|
||||
"""
|
||||
from eventlet import hubs
|
||||
assert resolution > 0
|
||||
hubs.get_hub().debug_blocking = state
|
||||
hubs.get_hub().debug_blocking_resolution = resolution
|
||||
if not state:
|
||||
hubs.get_hub().block_detect_post()
|
||||
218
venv/lib/python3.12/site-packages/eventlet/event.py
Normal file
218
venv/lib/python3.12/site-packages/eventlet/event.py
Normal file
@ -0,0 +1,218 @@
|
||||
from eventlet import hubs
|
||||
from eventlet.support import greenlets as greenlet
|
||||
|
||||
__all__ = ['Event']
|
||||
|
||||
|
||||
class NOT_USED:
|
||||
def __repr__(self):
|
||||
return 'NOT_USED'
|
||||
|
||||
|
||||
NOT_USED = NOT_USED()
|
||||
|
||||
|
||||
class Event:
|
||||
"""An abstraction where an arbitrary number of coroutines
|
||||
can wait for one event from another.
|
||||
|
||||
Events are similar to a Queue that can only hold one item, but differ
|
||||
in two important ways:
|
||||
|
||||
1. calling :meth:`send` never unschedules the current greenthread
|
||||
2. :meth:`send` can only be called once; create a new event to send again.
|
||||
|
||||
They are good for communicating results between coroutines, and
|
||||
are the basis for how
|
||||
:meth:`GreenThread.wait() <eventlet.greenthread.GreenThread.wait>`
|
||||
is implemented.
|
||||
|
||||
>>> from eventlet import event
|
||||
>>> import eventlet
|
||||
>>> evt = event.Event()
|
||||
>>> def baz(b):
|
||||
... evt.send(b + 1)
|
||||
...
|
||||
>>> _ = eventlet.spawn_n(baz, 3)
|
||||
>>> evt.wait()
|
||||
4
|
||||
"""
|
||||
_result = None
|
||||
_exc = None
|
||||
|
||||
def __init__(self):
|
||||
self._waiters = set()
|
||||
self.reset()
|
||||
|
||||
def __str__(self):
|
||||
params = (self.__class__.__name__, hex(id(self)),
|
||||
self._result, self._exc, len(self._waiters))
|
||||
return '<%s at %s result=%r _exc=%r _waiters[%d]>' % params
|
||||
|
||||
def reset(self):
|
||||
# this is kind of a misfeature and doesn't work perfectly well,
|
||||
# it's better to create a new event rather than reset an old one
|
||||
# removing documentation so that we don't get new use cases for it
|
||||
assert self._result is not NOT_USED, 'Trying to re-reset() a fresh event.'
|
||||
self._result = NOT_USED
|
||||
self._exc = None
|
||||
|
||||
def ready(self):
|
||||
""" Return true if the :meth:`wait` call will return immediately.
|
||||
Used to avoid waiting for things that might take a while to time out.
|
||||
For example, you can put a bunch of events into a list, and then visit
|
||||
them all repeatedly, calling :meth:`ready` until one returns ``True``,
|
||||
and then you can :meth:`wait` on that one."""
|
||||
return self._result is not NOT_USED
|
||||
|
||||
def has_exception(self):
|
||||
return self._exc is not None
|
||||
|
||||
def has_result(self):
|
||||
return self._result is not NOT_USED and self._exc is None
|
||||
|
||||
def poll(self, notready=None):
|
||||
if self.ready():
|
||||
return self.wait()
|
||||
return notready
|
||||
|
||||
# QQQ make it return tuple (type, value, tb) instead of raising
|
||||
# because
|
||||
# 1) "poll" does not imply raising
|
||||
# 2) it's better not to screw up caller's sys.exc_info() by default
|
||||
# (e.g. if caller wants to calls the function in except or finally)
|
||||
def poll_exception(self, notready=None):
|
||||
if self.has_exception():
|
||||
return self.wait()
|
||||
return notready
|
||||
|
||||
def poll_result(self, notready=None):
|
||||
if self.has_result():
|
||||
return self.wait()
|
||||
return notready
|
||||
|
||||
def wait(self, timeout=None):
|
||||
"""Wait until another coroutine calls :meth:`send`.
|
||||
Returns the value the other coroutine passed to :meth:`send`.
|
||||
|
||||
>>> import eventlet
|
||||
>>> evt = eventlet.Event()
|
||||
>>> def wait_on():
|
||||
... retval = evt.wait()
|
||||
... print("waited for {0}".format(retval))
|
||||
>>> _ = eventlet.spawn(wait_on)
|
||||
>>> evt.send('result')
|
||||
>>> eventlet.sleep(0)
|
||||
waited for result
|
||||
|
||||
Returns immediately if the event has already occurred.
|
||||
|
||||
>>> evt.wait()
|
||||
'result'
|
||||
|
||||
When the timeout argument is present and not None, it should be a floating point number
|
||||
specifying a timeout for the operation in seconds (or fractions thereof).
|
||||
"""
|
||||
current = greenlet.getcurrent()
|
||||
if self._result is NOT_USED:
|
||||
hub = hubs.get_hub()
|
||||
self._waiters.add(current)
|
||||
timer = None
|
||||
if timeout is not None:
|
||||
timer = hub.schedule_call_local(timeout, self._do_send, None, None, current)
|
||||
try:
|
||||
result = hub.switch()
|
||||
if timer is not None:
|
||||
timer.cancel()
|
||||
return result
|
||||
finally:
|
||||
self._waiters.discard(current)
|
||||
if self._exc is not None:
|
||||
current.throw(*self._exc)
|
||||
return self._result
|
||||
|
||||
def send(self, result=None, exc=None):
|
||||
"""Makes arrangements for the waiters to be woken with the
|
||||
result and then returns immediately to the parent.
|
||||
|
||||
>>> from eventlet import event
|
||||
>>> import eventlet
|
||||
>>> evt = event.Event()
|
||||
>>> def waiter():
|
||||
... print('about to wait')
|
||||
... result = evt.wait()
|
||||
... print('waited for {0}'.format(result))
|
||||
>>> _ = eventlet.spawn(waiter)
|
||||
>>> eventlet.sleep(0)
|
||||
about to wait
|
||||
>>> evt.send('a')
|
||||
>>> eventlet.sleep(0)
|
||||
waited for a
|
||||
|
||||
It is an error to call :meth:`send` multiple times on the same event.
|
||||
|
||||
>>> evt.send('whoops') # doctest: +IGNORE_EXCEPTION_DETAIL
|
||||
Traceback (most recent call last):
|
||||
AssertionError: Trying to re-send() an already-triggered event.
|
||||
|
||||
Use :meth:`reset` between :meth:`send` s to reuse an event object.
|
||||
"""
|
||||
assert self._result is NOT_USED, 'Trying to re-send() an already-triggered event.'
|
||||
self._result = result
|
||||
if exc is not None and not isinstance(exc, tuple):
|
||||
exc = (exc, )
|
||||
self._exc = exc
|
||||
hub = hubs.get_hub()
|
||||
for waiter in self._waiters:
|
||||
hub.schedule_call_global(
|
||||
0, self._do_send, self._result, self._exc, waiter)
|
||||
|
||||
def _do_send(self, result, exc, waiter):
|
||||
if waiter in self._waiters:
|
||||
if exc is None:
|
||||
waiter.switch(result)
|
||||
else:
|
||||
waiter.throw(*exc)
|
||||
|
||||
def send_exception(self, *args):
|
||||
"""Same as :meth:`send`, but sends an exception to waiters.
|
||||
|
||||
The arguments to send_exception are the same as the arguments
|
||||
to ``raise``. If a single exception object is passed in, it
|
||||
will be re-raised when :meth:`wait` is called, generating a
|
||||
new stacktrace.
|
||||
|
||||
>>> from eventlet import event
|
||||
>>> evt = event.Event()
|
||||
>>> evt.send_exception(RuntimeError())
|
||||
>>> evt.wait()
|
||||
Traceback (most recent call last):
|
||||
File "<stdin>", line 1, in <module>
|
||||
File "eventlet/event.py", line 120, in wait
|
||||
current.throw(*self._exc)
|
||||
RuntimeError
|
||||
|
||||
If it's important to preserve the entire original stack trace,
|
||||
you must pass in the entire :func:`sys.exc_info` tuple.
|
||||
|
||||
>>> import sys
|
||||
>>> evt = event.Event()
|
||||
>>> try:
|
||||
... raise RuntimeError()
|
||||
... except RuntimeError:
|
||||
... evt.send_exception(*sys.exc_info())
|
||||
...
|
||||
>>> evt.wait()
|
||||
Traceback (most recent call last):
|
||||
File "<stdin>", line 1, in <module>
|
||||
File "eventlet/event.py", line 120, in wait
|
||||
current.throw(*self._exc)
|
||||
File "<stdin>", line 2, in <module>
|
||||
RuntimeError
|
||||
|
||||
Note that doing so stores a traceback object directly on the
|
||||
Event object, which may cause reference cycles. See the
|
||||
:func:`sys.exc_info` documentation.
|
||||
"""
|
||||
# the arguments and the same as for greenlet.throw
|
||||
return self.send(None, args)
|
||||
@ -0,0 +1,15 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import socket
|
||||
from eventlet.green import SocketServer
|
||||
|
||||
patcher.inject(
|
||||
'http.server',
|
||||
globals(),
|
||||
('socket', socket),
|
||||
('SocketServer', SocketServer),
|
||||
('socketserver', SocketServer))
|
||||
|
||||
del patcher
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
@ -0,0 +1,17 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import BaseHTTPServer
|
||||
from eventlet.green import SimpleHTTPServer
|
||||
from eventlet.green import urllib
|
||||
from eventlet.green import select
|
||||
|
||||
test = None # bind prior to patcher.inject to silence pyflakes warning below
|
||||
patcher.inject(
|
||||
'http.server',
|
||||
globals(),
|
||||
('urllib', urllib),
|
||||
('select', select))
|
||||
|
||||
del patcher
|
||||
|
||||
if __name__ == '__main__':
|
||||
test() # pyflakes false alarm here unless test = None above
|
||||
40
venv/lib/python3.12/site-packages/eventlet/green/MySQLdb.py
Normal file
40
venv/lib/python3.12/site-packages/eventlet/green/MySQLdb.py
Normal file
@ -0,0 +1,40 @@
|
||||
__MySQLdb = __import__('MySQLdb')
|
||||
|
||||
__all__ = __MySQLdb.__all__
|
||||
__patched__ = ["connect", "Connect", 'Connection', 'connections']
|
||||
|
||||
from eventlet.patcher import slurp_properties
|
||||
slurp_properties(
|
||||
__MySQLdb, globals(),
|
||||
ignore=__patched__, srckeys=dir(__MySQLdb))
|
||||
|
||||
from eventlet import tpool
|
||||
|
||||
__orig_connections = __import__('MySQLdb.connections').connections
|
||||
|
||||
|
||||
def Connection(*args, **kw):
|
||||
conn = tpool.execute(__orig_connections.Connection, *args, **kw)
|
||||
return tpool.Proxy(conn, autowrap_names=('cursor',))
|
||||
|
||||
|
||||
connect = Connect = Connection
|
||||
|
||||
|
||||
# replicate the MySQLdb.connections module but with a tpooled Connection factory
|
||||
class MySQLdbConnectionsModule:
|
||||
pass
|
||||
|
||||
|
||||
connections = MySQLdbConnectionsModule()
|
||||
for var in dir(__orig_connections):
|
||||
if not var.startswith('__'):
|
||||
setattr(connections, var, getattr(__orig_connections, var))
|
||||
connections.Connection = Connection
|
||||
|
||||
cursors = __import__('MySQLdb.cursors').cursors
|
||||
converters = __import__('MySQLdb.converters').converters
|
||||
|
||||
# TODO support instantiating cursors.FooCursor objects directly
|
||||
# TODO though this is a low priority, it would be nice if we supported
|
||||
# subclassing eventlet.green.MySQLdb.connections.Connection
|
||||
125
venv/lib/python3.12/site-packages/eventlet/green/OpenSSL/SSL.py
Normal file
125
venv/lib/python3.12/site-packages/eventlet/green/OpenSSL/SSL.py
Normal file
@ -0,0 +1,125 @@
|
||||
from OpenSSL import SSL as orig_SSL
|
||||
from OpenSSL.SSL import *
|
||||
from eventlet.support import get_errno
|
||||
from eventlet import greenio
|
||||
from eventlet.hubs import trampoline
|
||||
import socket
|
||||
|
||||
|
||||
class GreenConnection(greenio.GreenSocket):
|
||||
""" Nonblocking wrapper for SSL.Connection objects.
|
||||
"""
|
||||
|
||||
def __init__(self, ctx, sock=None):
|
||||
if sock is not None:
|
||||
fd = orig_SSL.Connection(ctx, sock)
|
||||
else:
|
||||
# if we're given a Connection object directly, use it;
|
||||
# this is used in the inherited accept() method
|
||||
fd = ctx
|
||||
super(ConnectionType, self).__init__(fd)
|
||||
|
||||
def do_handshake(self):
|
||||
""" Perform an SSL handshake (usually called after renegotiate or one of
|
||||
set_accept_state or set_accept_state). This can raise the same exceptions as
|
||||
send and recv. """
|
||||
if self.act_non_blocking:
|
||||
return self.fd.do_handshake()
|
||||
while True:
|
||||
try:
|
||||
return self.fd.do_handshake()
|
||||
except WantReadError:
|
||||
trampoline(self.fd.fileno(),
|
||||
read=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
except WantWriteError:
|
||||
trampoline(self.fd.fileno(),
|
||||
write=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
|
||||
def dup(self):
|
||||
raise NotImplementedError("Dup not supported on SSL sockets")
|
||||
|
||||
def makefile(self, mode='r', bufsize=-1):
|
||||
raise NotImplementedError("Makefile not supported on SSL sockets")
|
||||
|
||||
def read(self, size):
|
||||
"""Works like a blocking call to SSL_read(), whose behavior is
|
||||
described here: http://www.openssl.org/docs/ssl/SSL_read.html"""
|
||||
if self.act_non_blocking:
|
||||
return self.fd.read(size)
|
||||
while True:
|
||||
try:
|
||||
return self.fd.read(size)
|
||||
except WantReadError:
|
||||
trampoline(self.fd.fileno(),
|
||||
read=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
except WantWriteError:
|
||||
trampoline(self.fd.fileno(),
|
||||
write=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
except SysCallError as e:
|
||||
if get_errno(e) == -1 or get_errno(e) > 0:
|
||||
return ''
|
||||
|
||||
recv = read
|
||||
|
||||
def write(self, data):
|
||||
"""Works like a blocking call to SSL_write(), whose behavior is
|
||||
described here: http://www.openssl.org/docs/ssl/SSL_write.html"""
|
||||
if not data:
|
||||
return 0 # calling SSL_write() with 0 bytes to be sent is undefined
|
||||
if self.act_non_blocking:
|
||||
return self.fd.write(data)
|
||||
while True:
|
||||
try:
|
||||
return self.fd.write(data)
|
||||
except WantReadError:
|
||||
trampoline(self.fd.fileno(),
|
||||
read=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
except WantWriteError:
|
||||
trampoline(self.fd.fileno(),
|
||||
write=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
|
||||
send = write
|
||||
|
||||
def sendall(self, data):
|
||||
"""Send "all" data on the connection. This calls send() repeatedly until
|
||||
all data is sent. If an error occurs, it's impossible to tell how much data
|
||||
has been sent.
|
||||
|
||||
No return value."""
|
||||
tail = self.send(data)
|
||||
while tail < len(data):
|
||||
tail += self.send(data[tail:])
|
||||
|
||||
def shutdown(self):
|
||||
if self.act_non_blocking:
|
||||
return self.fd.shutdown()
|
||||
while True:
|
||||
try:
|
||||
return self.fd.shutdown()
|
||||
except WantReadError:
|
||||
trampoline(self.fd.fileno(),
|
||||
read=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
except WantWriteError:
|
||||
trampoline(self.fd.fileno(),
|
||||
write=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
|
||||
|
||||
Connection = ConnectionType = GreenConnection
|
||||
|
||||
del greenio
|
||||
@ -0,0 +1,9 @@
|
||||
from . import crypto
|
||||
from . import SSL
|
||||
try:
|
||||
# pyopenssl tsafe module was deprecated and removed in v20.0.0
|
||||
# https://github.com/pyca/pyopenssl/pull/913
|
||||
from . import tsafe
|
||||
except ImportError:
|
||||
pass
|
||||
from .version import __version__
|
||||
@ -0,0 +1 @@
|
||||
from OpenSSL.crypto import *
|
||||
@ -0,0 +1 @@
|
||||
from OpenSSL.tsafe import *
|
||||
@ -0,0 +1 @@
|
||||
from OpenSSL.version import __version__, __doc__
|
||||
33
venv/lib/python3.12/site-packages/eventlet/green/Queue.py
Normal file
33
venv/lib/python3.12/site-packages/eventlet/green/Queue.py
Normal file
@ -0,0 +1,33 @@
|
||||
from eventlet import queue
|
||||
|
||||
__all__ = ['Empty', 'Full', 'LifoQueue', 'PriorityQueue', 'Queue']
|
||||
|
||||
__patched__ = ['LifoQueue', 'PriorityQueue', 'Queue']
|
||||
|
||||
# these classes exist to paper over the major operational difference between
|
||||
# eventlet.queue.Queue and the stdlib equivalents
|
||||
|
||||
|
||||
class Queue(queue.Queue):
|
||||
def __init__(self, maxsize=0):
|
||||
if maxsize == 0:
|
||||
maxsize = None
|
||||
super().__init__(maxsize)
|
||||
|
||||
|
||||
class PriorityQueue(queue.PriorityQueue):
|
||||
def __init__(self, maxsize=0):
|
||||
if maxsize == 0:
|
||||
maxsize = None
|
||||
super().__init__(maxsize)
|
||||
|
||||
|
||||
class LifoQueue(queue.LifoQueue):
|
||||
def __init__(self, maxsize=0):
|
||||
if maxsize == 0:
|
||||
maxsize = None
|
||||
super().__init__(maxsize)
|
||||
|
||||
|
||||
Empty = queue.Empty
|
||||
Full = queue.Full
|
||||
@ -0,0 +1,13 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import BaseHTTPServer
|
||||
from eventlet.green import urllib
|
||||
|
||||
patcher.inject(
|
||||
'http.server',
|
||||
globals(),
|
||||
('urllib', urllib))
|
||||
|
||||
del patcher
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
@ -0,0 +1,14 @@
|
||||
from eventlet import patcher
|
||||
|
||||
from eventlet.green import socket
|
||||
from eventlet.green import select
|
||||
from eventlet.green import threading
|
||||
|
||||
patcher.inject(
|
||||
'socketserver',
|
||||
globals(),
|
||||
('socket', socket),
|
||||
('select', select),
|
||||
('threading', threading))
|
||||
|
||||
# QQQ ForkingMixIn should be fixed to use green waitpid?
|
||||
@ -0,0 +1 @@
|
||||
# this package contains modules from the standard library converted to use eventlet
|
||||
@ -0,0 +1,33 @@
|
||||
__socket = __import__('socket')
|
||||
|
||||
__all__ = __socket.__all__
|
||||
__patched__ = ['fromfd', 'socketpair', 'ssl', 'socket', 'timeout']
|
||||
|
||||
import eventlet.patcher
|
||||
eventlet.patcher.slurp_properties(__socket, globals(), ignore=__patched__, srckeys=dir(__socket))
|
||||
|
||||
os = __import__('os')
|
||||
import sys
|
||||
from eventlet import greenio
|
||||
|
||||
|
||||
socket = greenio.GreenSocket
|
||||
_GLOBAL_DEFAULT_TIMEOUT = greenio._GLOBAL_DEFAULT_TIMEOUT
|
||||
timeout = greenio.socket_timeout
|
||||
|
||||
try:
|
||||
__original_fromfd__ = __socket.fromfd
|
||||
|
||||
def fromfd(*args):
|
||||
return socket(__original_fromfd__(*args))
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
__original_socketpair__ = __socket.socketpair
|
||||
|
||||
def socketpair(*args):
|
||||
one, two = __original_socketpair__(*args)
|
||||
return socket(one), socket(two)
|
||||
except AttributeError:
|
||||
pass
|
||||
14
venv/lib/python3.12/site-packages/eventlet/green/asynchat.py
Normal file
14
venv/lib/python3.12/site-packages/eventlet/green/asynchat.py
Normal file
@ -0,0 +1,14 @@
|
||||
import sys
|
||||
|
||||
if sys.version_info < (3, 12):
|
||||
from eventlet import patcher
|
||||
from eventlet.green import asyncore
|
||||
from eventlet.green import socket
|
||||
|
||||
patcher.inject(
|
||||
'asynchat',
|
||||
globals(),
|
||||
('asyncore', asyncore),
|
||||
('socket', socket))
|
||||
|
||||
del patcher
|
||||
16
venv/lib/python3.12/site-packages/eventlet/green/asyncore.py
Normal file
16
venv/lib/python3.12/site-packages/eventlet/green/asyncore.py
Normal file
@ -0,0 +1,16 @@
|
||||
import sys
|
||||
|
||||
if sys.version_info < (3, 12):
|
||||
from eventlet import patcher
|
||||
from eventlet.green import select
|
||||
from eventlet.green import socket
|
||||
from eventlet.green import time
|
||||
|
||||
patcher.inject(
|
||||
"asyncore",
|
||||
globals(),
|
||||
('select', select),
|
||||
('socket', socket),
|
||||
('time', time))
|
||||
|
||||
del patcher
|
||||
38
venv/lib/python3.12/site-packages/eventlet/green/builtin.py
Normal file
38
venv/lib/python3.12/site-packages/eventlet/green/builtin.py
Normal file
@ -0,0 +1,38 @@
|
||||
"""
|
||||
In order to detect a filehandle that's been closed, our only clue may be
|
||||
the operating system returning the same filehandle in response to some
|
||||
other operation.
|
||||
|
||||
The builtins 'file' and 'open' are patched to collaborate with the
|
||||
notify_opened protocol.
|
||||
"""
|
||||
|
||||
builtins_orig = __builtins__
|
||||
|
||||
from eventlet import hubs
|
||||
from eventlet.hubs import hub
|
||||
from eventlet.patcher import slurp_properties
|
||||
import sys
|
||||
|
||||
__all__ = dir(builtins_orig)
|
||||
__patched__ = ['open']
|
||||
slurp_properties(builtins_orig, globals(),
|
||||
ignore=__patched__, srckeys=dir(builtins_orig))
|
||||
|
||||
hubs.get_hub()
|
||||
|
||||
__original_open = open
|
||||
__opening = False
|
||||
|
||||
|
||||
def open(*args, **kwargs):
|
||||
global __opening
|
||||
result = __original_open(*args, **kwargs)
|
||||
if not __opening:
|
||||
# This is incredibly ugly. 'open' is used under the hood by
|
||||
# the import process. So, ensure we don't wind up in an
|
||||
# infinite loop.
|
||||
__opening = True
|
||||
hubs.notify_opened(result.fileno())
|
||||
__opening = False
|
||||
return result
|
||||
13
venv/lib/python3.12/site-packages/eventlet/green/ftplib.py
Normal file
13
venv/lib/python3.12/site-packages/eventlet/green/ftplib.py
Normal file
@ -0,0 +1,13 @@
|
||||
from eventlet import patcher
|
||||
|
||||
# *NOTE: there might be some funny business with the "SOCKS" module
|
||||
# if it even still exists
|
||||
from eventlet.green import socket
|
||||
|
||||
patcher.inject('ftplib', globals(), ('socket', socket))
|
||||
|
||||
del patcher
|
||||
|
||||
# Run test program when run as a script
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
@ -0,0 +1,189 @@
|
||||
# This is part of Python source code with Eventlet-specific modifications.
|
||||
#
|
||||
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
|
||||
# Reserved
|
||||
#
|
||||
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
||||
# --------------------------------------------
|
||||
#
|
||||
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
|
||||
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
|
||||
# otherwise using this software ("Python") in source or binary form and
|
||||
# its associated documentation.
|
||||
#
|
||||
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
||||
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
||||
# analyze, test, perform and/or display publicly, prepare derivative works,
|
||||
# distribute, and otherwise use Python alone or in any derivative version,
|
||||
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
|
||||
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
|
||||
# Reserved" are retained in Python alone or in any derivative version prepared by
|
||||
# Licensee.
|
||||
#
|
||||
# 3. In the event Licensee prepares a derivative work that is based on
|
||||
# or incorporates Python or any part thereof, and wants to make
|
||||
# the derivative work available to others as provided herein, then
|
||||
# Licensee hereby agrees to include in any such work a brief summary of
|
||||
# the changes made to Python.
|
||||
#
|
||||
# 4. PSF is making Python available to Licensee on an "AS IS"
|
||||
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
||||
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
||||
# INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
#
|
||||
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
||||
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
#
|
||||
# 6. This License Agreement will automatically terminate upon a material
|
||||
# breach of its terms and conditions.
|
||||
#
|
||||
# 7. Nothing in this License Agreement shall be deemed to create any
|
||||
# relationship of agency, partnership, or joint venture between PSF and
|
||||
# Licensee. This License Agreement does not grant permission to use PSF
|
||||
# trademarks or trade name in a trademark sense to endorse or promote
|
||||
# products or services of Licensee, or any third party.
|
||||
#
|
||||
# 8. By copying, installing or otherwise using Python, Licensee
|
||||
# agrees to be bound by the terms and conditions of this License
|
||||
# Agreement.
|
||||
|
||||
from enum import IntEnum
|
||||
|
||||
__all__ = ['HTTPStatus']
|
||||
|
||||
class HTTPStatus(IntEnum):
|
||||
"""HTTP status codes and reason phrases
|
||||
|
||||
Status codes from the following RFCs are all observed:
|
||||
|
||||
* RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616
|
||||
* RFC 6585: Additional HTTP Status Codes
|
||||
* RFC 3229: Delta encoding in HTTP
|
||||
* RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518
|
||||
* RFC 5842: Binding Extensions to WebDAV
|
||||
* RFC 7238: Permanent Redirect
|
||||
* RFC 2295: Transparent Content Negotiation in HTTP
|
||||
* RFC 2774: An HTTP Extension Framework
|
||||
"""
|
||||
def __new__(cls, value, phrase, description=''):
|
||||
obj = int.__new__(cls, value)
|
||||
obj._value_ = value
|
||||
|
||||
obj.phrase = phrase
|
||||
obj.description = description
|
||||
return obj
|
||||
|
||||
# informational
|
||||
CONTINUE = 100, 'Continue', 'Request received, please continue'
|
||||
SWITCHING_PROTOCOLS = (101, 'Switching Protocols',
|
||||
'Switching to new protocol; obey Upgrade header')
|
||||
PROCESSING = 102, 'Processing'
|
||||
|
||||
# success
|
||||
OK = 200, 'OK', 'Request fulfilled, document follows'
|
||||
CREATED = 201, 'Created', 'Document created, URL follows'
|
||||
ACCEPTED = (202, 'Accepted',
|
||||
'Request accepted, processing continues off-line')
|
||||
NON_AUTHORITATIVE_INFORMATION = (203,
|
||||
'Non-Authoritative Information', 'Request fulfilled from cache')
|
||||
NO_CONTENT = 204, 'No Content', 'Request fulfilled, nothing follows'
|
||||
RESET_CONTENT = 205, 'Reset Content', 'Clear input form for further input'
|
||||
PARTIAL_CONTENT = 206, 'Partial Content', 'Partial content follows'
|
||||
MULTI_STATUS = 207, 'Multi-Status'
|
||||
ALREADY_REPORTED = 208, 'Already Reported'
|
||||
IM_USED = 226, 'IM Used'
|
||||
|
||||
# redirection
|
||||
MULTIPLE_CHOICES = (300, 'Multiple Choices',
|
||||
'Object has several resources -- see URI list')
|
||||
MOVED_PERMANENTLY = (301, 'Moved Permanently',
|
||||
'Object moved permanently -- see URI list')
|
||||
FOUND = 302, 'Found', 'Object moved temporarily -- see URI list'
|
||||
SEE_OTHER = 303, 'See Other', 'Object moved -- see Method and URL list'
|
||||
NOT_MODIFIED = (304, 'Not Modified',
|
||||
'Document has not changed since given time')
|
||||
USE_PROXY = (305, 'Use Proxy',
|
||||
'You must use proxy specified in Location to access this resource')
|
||||
TEMPORARY_REDIRECT = (307, 'Temporary Redirect',
|
||||
'Object moved temporarily -- see URI list')
|
||||
PERMANENT_REDIRECT = (308, 'Permanent Redirect',
|
||||
'Object moved temporarily -- see URI list')
|
||||
|
||||
# client error
|
||||
BAD_REQUEST = (400, 'Bad Request',
|
||||
'Bad request syntax or unsupported method')
|
||||
UNAUTHORIZED = (401, 'Unauthorized',
|
||||
'No permission -- see authorization schemes')
|
||||
PAYMENT_REQUIRED = (402, 'Payment Required',
|
||||
'No payment -- see charging schemes')
|
||||
FORBIDDEN = (403, 'Forbidden',
|
||||
'Request forbidden -- authorization will not help')
|
||||
NOT_FOUND = (404, 'Not Found',
|
||||
'Nothing matches the given URI')
|
||||
METHOD_NOT_ALLOWED = (405, 'Method Not Allowed',
|
||||
'Specified method is invalid for this resource')
|
||||
NOT_ACCEPTABLE = (406, 'Not Acceptable',
|
||||
'URI not available in preferred format')
|
||||
PROXY_AUTHENTICATION_REQUIRED = (407,
|
||||
'Proxy Authentication Required',
|
||||
'You must authenticate with this proxy before proceeding')
|
||||
REQUEST_TIMEOUT = (408, 'Request Timeout',
|
||||
'Request timed out; try again later')
|
||||
CONFLICT = 409, 'Conflict', 'Request conflict'
|
||||
GONE = (410, 'Gone',
|
||||
'URI no longer exists and has been permanently removed')
|
||||
LENGTH_REQUIRED = (411, 'Length Required',
|
||||
'Client must specify Content-Length')
|
||||
PRECONDITION_FAILED = (412, 'Precondition Failed',
|
||||
'Precondition in headers is false')
|
||||
REQUEST_ENTITY_TOO_LARGE = (413, 'Request Entity Too Large',
|
||||
'Entity is too large')
|
||||
REQUEST_URI_TOO_LONG = (414, 'Request-URI Too Long',
|
||||
'URI is too long')
|
||||
UNSUPPORTED_MEDIA_TYPE = (415, 'Unsupported Media Type',
|
||||
'Entity body in unsupported format')
|
||||
REQUESTED_RANGE_NOT_SATISFIABLE = (416,
|
||||
'Requested Range Not Satisfiable',
|
||||
'Cannot satisfy request range')
|
||||
EXPECTATION_FAILED = (417, 'Expectation Failed',
|
||||
'Expect condition could not be satisfied')
|
||||
UNPROCESSABLE_ENTITY = 422, 'Unprocessable Entity'
|
||||
LOCKED = 423, 'Locked'
|
||||
FAILED_DEPENDENCY = 424, 'Failed Dependency'
|
||||
UPGRADE_REQUIRED = 426, 'Upgrade Required'
|
||||
PRECONDITION_REQUIRED = (428, 'Precondition Required',
|
||||
'The origin server requires the request to be conditional')
|
||||
TOO_MANY_REQUESTS = (429, 'Too Many Requests',
|
||||
'The user has sent too many requests in '
|
||||
'a given amount of time ("rate limiting")')
|
||||
REQUEST_HEADER_FIELDS_TOO_LARGE = (431,
|
||||
'Request Header Fields Too Large',
|
||||
'The server is unwilling to process the request because its header '
|
||||
'fields are too large')
|
||||
|
||||
# server errors
|
||||
INTERNAL_SERVER_ERROR = (500, 'Internal Server Error',
|
||||
'Server got itself in trouble')
|
||||
NOT_IMPLEMENTED = (501, 'Not Implemented',
|
||||
'Server does not support this operation')
|
||||
BAD_GATEWAY = (502, 'Bad Gateway',
|
||||
'Invalid responses from another server/proxy')
|
||||
SERVICE_UNAVAILABLE = (503, 'Service Unavailable',
|
||||
'The server cannot process the request due to a high load')
|
||||
GATEWAY_TIMEOUT = (504, 'Gateway Timeout',
|
||||
'The gateway server did not receive a timely response')
|
||||
HTTP_VERSION_NOT_SUPPORTED = (505, 'HTTP Version Not Supported',
|
||||
'Cannot fulfill request')
|
||||
VARIANT_ALSO_NEGOTIATES = 506, 'Variant Also Negotiates'
|
||||
INSUFFICIENT_STORAGE = 507, 'Insufficient Storage'
|
||||
LOOP_DETECTED = 508, 'Loop Detected'
|
||||
NOT_EXTENDED = 510, 'Not Extended'
|
||||
NETWORK_AUTHENTICATION_REQUIRED = (511,
|
||||
'Network Authentication Required',
|
||||
'The client needs to authenticate to gain network access')
|
||||
1578
venv/lib/python3.12/site-packages/eventlet/green/http/client.py
Normal file
1578
venv/lib/python3.12/site-packages/eventlet/green/http/client.py
Normal file
File diff suppressed because it is too large
Load Diff
2152
venv/lib/python3.12/site-packages/eventlet/green/http/cookiejar.py
Normal file
2152
venv/lib/python3.12/site-packages/eventlet/green/http/cookiejar.py
Normal file
File diff suppressed because it is too large
Load Diff
691
venv/lib/python3.12/site-packages/eventlet/green/http/cookies.py
Normal file
691
venv/lib/python3.12/site-packages/eventlet/green/http/cookies.py
Normal file
@ -0,0 +1,691 @@
|
||||
# This is part of Python source code with Eventlet-specific modifications.
|
||||
#
|
||||
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
|
||||
# Reserved
|
||||
#
|
||||
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
||||
# --------------------------------------------
|
||||
#
|
||||
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
|
||||
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
|
||||
# otherwise using this software ("Python") in source or binary form and
|
||||
# its associated documentation.
|
||||
#
|
||||
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
||||
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
||||
# analyze, test, perform and/or display publicly, prepare derivative works,
|
||||
# distribute, and otherwise use Python alone or in any derivative version,
|
||||
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
|
||||
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
|
||||
# Reserved" are retained in Python alone or in any derivative version prepared by
|
||||
# Licensee.
|
||||
#
|
||||
# 3. In the event Licensee prepares a derivative work that is based on
|
||||
# or incorporates Python or any part thereof, and wants to make
|
||||
# the derivative work available to others as provided herein, then
|
||||
# Licensee hereby agrees to include in any such work a brief summary of
|
||||
# the changes made to Python.
|
||||
#
|
||||
# 4. PSF is making Python available to Licensee on an "AS IS"
|
||||
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
||||
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
||||
# INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
#
|
||||
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
||||
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
#
|
||||
# 6. This License Agreement will automatically terminate upon a material
|
||||
# breach of its terms and conditions.
|
||||
#
|
||||
# 7. Nothing in this License Agreement shall be deemed to create any
|
||||
# relationship of agency, partnership, or joint venture between PSF and
|
||||
# Licensee. This License Agreement does not grant permission to use PSF
|
||||
# trademarks or trade name in a trademark sense to endorse or promote
|
||||
# products or services of Licensee, or any third party.
|
||||
#
|
||||
# 8. By copying, installing or otherwise using Python, Licensee
|
||||
# agrees to be bound by the terms and conditions of this License
|
||||
# Agreement.
|
||||
####
|
||||
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software
|
||||
# and its documentation for any purpose and without fee is hereby
|
||||
# granted, provided that the above copyright notice appear in all
|
||||
# copies and that both that copyright notice and this permission
|
||||
# notice appear in supporting documentation, and that the name of
|
||||
# Timothy O'Malley not be used in advertising or publicity
|
||||
# pertaining to distribution of the software without specific, written
|
||||
# prior permission.
|
||||
#
|
||||
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
|
||||
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
|
||||
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
||||
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
# PERFORMANCE OF THIS SOFTWARE.
|
||||
#
|
||||
####
|
||||
#
|
||||
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
|
||||
# by Timothy O'Malley <timo@alum.mit.edu>
|
||||
#
|
||||
# Cookie.py is a Python module for the handling of HTTP
|
||||
# cookies as a Python dictionary. See RFC 2109 for more
|
||||
# information on cookies.
|
||||
#
|
||||
# The original idea to treat Cookies as a dictionary came from
|
||||
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
|
||||
# first version of nscookie.py.
|
||||
#
|
||||
####
|
||||
|
||||
r"""
|
||||
Here's a sample session to show how to use this module.
|
||||
At the moment, this is the only documentation.
|
||||
|
||||
The Basics
|
||||
----------
|
||||
|
||||
Importing is easy...
|
||||
|
||||
>>> from http import cookies
|
||||
|
||||
Most of the time you start by creating a cookie.
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
|
||||
Once you've created your Cookie, you can add values just as if it were
|
||||
a dictionary.
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
>>> C["fig"] = "newton"
|
||||
>>> C["sugar"] = "wafer"
|
||||
>>> C.output()
|
||||
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
|
||||
|
||||
Notice that the printable representation of a Cookie is the
|
||||
appropriate format for a Set-Cookie: header. This is the
|
||||
default behavior. You can change the header and printed
|
||||
attributes by using the .output() function
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
>>> C["rocky"] = "road"
|
||||
>>> C["rocky"]["path"] = "/cookie"
|
||||
>>> print(C.output(header="Cookie:"))
|
||||
Cookie: rocky=road; Path=/cookie
|
||||
>>> print(C.output(attrs=[], header="Cookie:"))
|
||||
Cookie: rocky=road
|
||||
|
||||
The load() method of a Cookie extracts cookies from a string. In a
|
||||
CGI script, you would use this method to extract the cookies from the
|
||||
HTTP_COOKIE environment variable.
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
>>> C.load("chips=ahoy; vienna=finger")
|
||||
>>> C.output()
|
||||
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
|
||||
|
||||
The load() method is darn-tootin smart about identifying cookies
|
||||
within a string. Escaped quotation marks, nested semicolons, and other
|
||||
such trickeries do not confuse it.
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
|
||||
>>> print(C)
|
||||
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
|
||||
|
||||
Each element of the Cookie also supports all of the RFC 2109
|
||||
Cookie attributes. Here's an example which sets the Path
|
||||
attribute.
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
>>> C["oreo"] = "doublestuff"
|
||||
>>> C["oreo"]["path"] = "/"
|
||||
>>> print(C)
|
||||
Set-Cookie: oreo=doublestuff; Path=/
|
||||
|
||||
Each dictionary element has a 'value' attribute, which gives you
|
||||
back the value associated with the key.
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
>>> C["twix"] = "none for you"
|
||||
>>> C["twix"].value
|
||||
'none for you'
|
||||
|
||||
The SimpleCookie expects that all values should be standard strings.
|
||||
Just to be sure, SimpleCookie invokes the str() builtin to convert
|
||||
the value to a string, when the values are set dictionary-style.
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
>>> C["number"] = 7
|
||||
>>> C["string"] = "seven"
|
||||
>>> C["number"].value
|
||||
'7'
|
||||
>>> C["string"].value
|
||||
'seven'
|
||||
>>> C.output()
|
||||
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
|
||||
|
||||
Finis.
|
||||
"""
|
||||
|
||||
#
|
||||
# Import our required modules
|
||||
#
|
||||
import re
|
||||
import string
|
||||
|
||||
__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
|
||||
|
||||
_nulljoin = ''.join
|
||||
_semispacejoin = '; '.join
|
||||
_spacejoin = ' '.join
|
||||
|
||||
def _warn_deprecated_setter(setter):
|
||||
import warnings
|
||||
msg = ('The .%s setter is deprecated. The attribute will be read-only in '
|
||||
'future releases. Please use the set() method instead.' % setter)
|
||||
warnings.warn(msg, DeprecationWarning, stacklevel=3)
|
||||
|
||||
#
|
||||
# Define an exception visible to External modules
|
||||
#
|
||||
class CookieError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
# These quoting routines conform to the RFC2109 specification, which in
|
||||
# turn references the character definitions from RFC2068. They provide
|
||||
# a two-way quoting algorithm. Any non-text character is translated
|
||||
# into a 4 character sequence: a forward-slash followed by the
|
||||
# three-digit octal equivalent of the character. Any '\' or '"' is
|
||||
# quoted with a preceding '\' slash.
|
||||
# Because of the way browsers really handle cookies (as opposed to what
|
||||
# the RFC says) we also encode "," and ";".
|
||||
#
|
||||
# These are taken from RFC2068 and RFC2109.
|
||||
# _LegalChars is the list of chars which don't require "'s
|
||||
# _Translator hash-table for fast quoting
|
||||
#
|
||||
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:"
|
||||
_UnescapedChars = _LegalChars + ' ()/<=>?@[]{}'
|
||||
|
||||
_Translator = {n: '\\%03o' % n
|
||||
for n in set(range(256)) - set(map(ord, _UnescapedChars))}
|
||||
_Translator.update({
|
||||
ord('"'): '\\"',
|
||||
ord('\\'): '\\\\',
|
||||
})
|
||||
|
||||
# Eventlet change: match used instead of fullmatch for Python 3.3 compatibility
|
||||
_is_legal_key = re.compile(r'[%s]+\Z' % re.escape(_LegalChars)).match
|
||||
|
||||
def _quote(str):
|
||||
r"""Quote a string for use in a cookie header.
|
||||
|
||||
If the string does not need to be double-quoted, then just return the
|
||||
string. Otherwise, surround the string in doublequotes and quote
|
||||
(with a \) special characters.
|
||||
"""
|
||||
if str is None or _is_legal_key(str):
|
||||
return str
|
||||
else:
|
||||
return '"' + str.translate(_Translator) + '"'
|
||||
|
||||
|
||||
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
|
||||
_QuotePatt = re.compile(r"[\\].")
|
||||
|
||||
def _unquote(str):
|
||||
# If there aren't any doublequotes,
|
||||
# then there can't be any special characters. See RFC 2109.
|
||||
if str is None or len(str) < 2:
|
||||
return str
|
||||
if str[0] != '"' or str[-1] != '"':
|
||||
return str
|
||||
|
||||
# We have to assume that we must decode this string.
|
||||
# Down to work.
|
||||
|
||||
# Remove the "s
|
||||
str = str[1:-1]
|
||||
|
||||
# Check for special sequences. Examples:
|
||||
# \012 --> \n
|
||||
# \" --> "
|
||||
#
|
||||
i = 0
|
||||
n = len(str)
|
||||
res = []
|
||||
while 0 <= i < n:
|
||||
o_match = _OctalPatt.search(str, i)
|
||||
q_match = _QuotePatt.search(str, i)
|
||||
if not o_match and not q_match: # Neither matched
|
||||
res.append(str[i:])
|
||||
break
|
||||
# else:
|
||||
j = k = -1
|
||||
if o_match:
|
||||
j = o_match.start(0)
|
||||
if q_match:
|
||||
k = q_match.start(0)
|
||||
if q_match and (not o_match or k < j): # QuotePatt matched
|
||||
res.append(str[i:k])
|
||||
res.append(str[k+1])
|
||||
i = k + 2
|
||||
else: # OctalPatt matched
|
||||
res.append(str[i:j])
|
||||
res.append(chr(int(str[j+1:j+4], 8)))
|
||||
i = j + 4
|
||||
return _nulljoin(res)
|
||||
|
||||
# The _getdate() routine is used to set the expiration time in the cookie's HTTP
|
||||
# header. By default, _getdate() returns the current time in the appropriate
|
||||
# "expires" format for a Set-Cookie header. The one optional argument is an
|
||||
# offset from now, in seconds. For example, an offset of -3600 means "one hour
|
||||
# ago". The offset may be a floating point number.
|
||||
#
|
||||
|
||||
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
|
||||
|
||||
_monthname = [None,
|
||||
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
|
||||
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
|
||||
|
||||
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
|
||||
from eventlet.green.time import gmtime, time
|
||||
now = time()
|
||||
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
|
||||
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
|
||||
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
|
||||
|
||||
|
||||
class Morsel(dict):
|
||||
"""A class to hold ONE (key, value) pair.
|
||||
|
||||
In a cookie, each such pair may have several attributes, so this class is
|
||||
used to keep the attributes associated with the appropriate key,value pair.
|
||||
This class also includes a coded_value attribute, which is used to hold
|
||||
the network representation of the value. This is most useful when Python
|
||||
objects are pickled for network transit.
|
||||
"""
|
||||
# RFC 2109 lists these attributes as reserved:
|
||||
# path comment domain
|
||||
# max-age secure version
|
||||
#
|
||||
# For historical reasons, these attributes are also reserved:
|
||||
# expires
|
||||
#
|
||||
# This is an extension from Microsoft:
|
||||
# httponly
|
||||
#
|
||||
# This dictionary provides a mapping from the lowercase
|
||||
# variant on the left to the appropriate traditional
|
||||
# formatting on the right.
|
||||
_reserved = {
|
||||
"expires" : "expires",
|
||||
"path" : "Path",
|
||||
"comment" : "Comment",
|
||||
"domain" : "Domain",
|
||||
"max-age" : "Max-Age",
|
||||
"secure" : "Secure",
|
||||
"httponly" : "HttpOnly",
|
||||
"version" : "Version",
|
||||
}
|
||||
|
||||
_flags = {'secure', 'httponly'}
|
||||
|
||||
def __init__(self):
|
||||
# Set defaults
|
||||
self._key = self._value = self._coded_value = None
|
||||
|
||||
# Set default attributes
|
||||
for key in self._reserved:
|
||||
dict.__setitem__(self, key, "")
|
||||
|
||||
@property
|
||||
def key(self):
|
||||
return self._key
|
||||
|
||||
@key.setter
|
||||
def key(self, key):
|
||||
_warn_deprecated_setter('key')
|
||||
self._key = key
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
return self._value
|
||||
|
||||
@value.setter
|
||||
def value(self, value):
|
||||
_warn_deprecated_setter('value')
|
||||
self._value = value
|
||||
|
||||
@property
|
||||
def coded_value(self):
|
||||
return self._coded_value
|
||||
|
||||
@coded_value.setter
|
||||
def coded_value(self, coded_value):
|
||||
_warn_deprecated_setter('coded_value')
|
||||
self._coded_value = coded_value
|
||||
|
||||
def __setitem__(self, K, V):
|
||||
K = K.lower()
|
||||
if not K in self._reserved:
|
||||
raise CookieError("Invalid attribute %r" % (K,))
|
||||
dict.__setitem__(self, K, V)
|
||||
|
||||
def setdefault(self, key, val=None):
|
||||
key = key.lower()
|
||||
if key not in self._reserved:
|
||||
raise CookieError("Invalid attribute %r" % (key,))
|
||||
return dict.setdefault(self, key, val)
|
||||
|
||||
def __eq__(self, morsel):
|
||||
if not isinstance(morsel, Morsel):
|
||||
return NotImplemented
|
||||
return (dict.__eq__(self, morsel) and
|
||||
self._value == morsel._value and
|
||||
self._key == morsel._key and
|
||||
self._coded_value == morsel._coded_value)
|
||||
|
||||
__ne__ = object.__ne__
|
||||
|
||||
def copy(self):
|
||||
morsel = Morsel()
|
||||
dict.update(morsel, self)
|
||||
morsel.__dict__.update(self.__dict__)
|
||||
return morsel
|
||||
|
||||
def update(self, values):
|
||||
data = {}
|
||||
for key, val in dict(values).items():
|
||||
key = key.lower()
|
||||
if key not in self._reserved:
|
||||
raise CookieError("Invalid attribute %r" % (key,))
|
||||
data[key] = val
|
||||
dict.update(self, data)
|
||||
|
||||
def isReservedKey(self, K):
|
||||
return K.lower() in self._reserved
|
||||
|
||||
def set(self, key, val, coded_val, LegalChars=_LegalChars):
|
||||
if LegalChars != _LegalChars:
|
||||
import warnings
|
||||
warnings.warn(
|
||||
'LegalChars parameter is deprecated, ignored and will '
|
||||
'be removed in future versions.', DeprecationWarning,
|
||||
stacklevel=2)
|
||||
|
||||
if key.lower() in self._reserved:
|
||||
raise CookieError('Attempt to set a reserved key %r' % (key,))
|
||||
if not _is_legal_key(key):
|
||||
raise CookieError('Illegal key %r' % (key,))
|
||||
|
||||
# It's a good key, so save it.
|
||||
self._key = key
|
||||
self._value = val
|
||||
self._coded_value = coded_val
|
||||
|
||||
def __getstate__(self):
|
||||
return {
|
||||
'key': self._key,
|
||||
'value': self._value,
|
||||
'coded_value': self._coded_value,
|
||||
}
|
||||
|
||||
def __setstate__(self, state):
|
||||
self._key = state['key']
|
||||
self._value = state['value']
|
||||
self._coded_value = state['coded_value']
|
||||
|
||||
def output(self, attrs=None, header="Set-Cookie:"):
|
||||
return "%s %s" % (header, self.OutputString(attrs))
|
||||
|
||||
__str__ = output
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self.OutputString())
|
||||
|
||||
def js_output(self, attrs=None):
|
||||
# Print javascript
|
||||
return """
|
||||
<script type="text/javascript">
|
||||
<!-- begin hiding
|
||||
document.cookie = \"%s\";
|
||||
// end hiding -->
|
||||
</script>
|
||||
""" % (self.OutputString(attrs).replace('"', r'\"'))
|
||||
|
||||
def OutputString(self, attrs=None):
|
||||
# Build up our result
|
||||
#
|
||||
result = []
|
||||
append = result.append
|
||||
|
||||
# First, the key=value pair
|
||||
append("%s=%s" % (self.key, self.coded_value))
|
||||
|
||||
# Now add any defined attributes
|
||||
if attrs is None:
|
||||
attrs = self._reserved
|
||||
items = sorted(self.items())
|
||||
for key, value in items:
|
||||
if value == "":
|
||||
continue
|
||||
if key not in attrs:
|
||||
continue
|
||||
if key == "expires" and isinstance(value, int):
|
||||
append("%s=%s" % (self._reserved[key], _getdate(value)))
|
||||
elif key == "max-age" and isinstance(value, int):
|
||||
append("%s=%d" % (self._reserved[key], value))
|
||||
elif key in self._flags:
|
||||
if value:
|
||||
append(str(self._reserved[key]))
|
||||
else:
|
||||
append("%s=%s" % (self._reserved[key], value))
|
||||
|
||||
# Return the result
|
||||
return _semispacejoin(result)
|
||||
|
||||
|
||||
#
|
||||
# Pattern for finding cookie
|
||||
#
|
||||
# This used to be strict parsing based on the RFC2109 and RFC2068
|
||||
# specifications. I have since discovered that MSIE 3.0x doesn't
|
||||
# follow the character rules outlined in those specs. As a
|
||||
# result, the parsing rules here are less strict.
|
||||
#
|
||||
|
||||
_LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\="
|
||||
_LegalValueChars = _LegalKeyChars + r'\[\]'
|
||||
_CookiePattern = re.compile(r"""
|
||||
(?x) # This is a verbose pattern
|
||||
\s* # Optional whitespace at start of cookie
|
||||
(?P<key> # Start of group 'key'
|
||||
[""" + _LegalKeyChars + r"""]+? # Any word of at least one letter
|
||||
) # End of group 'key'
|
||||
( # Optional group: there may not be a value.
|
||||
\s*=\s* # Equal Sign
|
||||
(?P<val> # Start of group 'val'
|
||||
"(?:[^\\"]|\\.)*" # Any doublequoted string
|
||||
| # or
|
||||
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
|
||||
| # or
|
||||
[""" + _LegalValueChars + r"""]* # Any word or empty string
|
||||
) # End of group 'val'
|
||||
)? # End of optional value group
|
||||
\s* # Any number of spaces.
|
||||
(\s+|;|$) # Ending either at space, semicolon, or EOS.
|
||||
""", re.ASCII) # May be removed if safe.
|
||||
|
||||
|
||||
# At long last, here is the cookie class. Using this class is almost just like
|
||||
# using a dictionary. See this module's docstring for example usage.
|
||||
#
|
||||
class BaseCookie(dict):
|
||||
"""A container class for a set of Morsels."""
|
||||
|
||||
def value_decode(self, val):
|
||||
"""real_value, coded_value = value_decode(STRING)
|
||||
Called prior to setting a cookie's value from the network
|
||||
representation. The VALUE is the value read from HTTP
|
||||
header.
|
||||
Override this function to modify the behavior of cookies.
|
||||
"""
|
||||
return val, val
|
||||
|
||||
def value_encode(self, val):
|
||||
"""real_value, coded_value = value_encode(VALUE)
|
||||
Called prior to setting a cookie's value from the dictionary
|
||||
representation. The VALUE is the value being assigned.
|
||||
Override this function to modify the behavior of cookies.
|
||||
"""
|
||||
strval = str(val)
|
||||
return strval, strval
|
||||
|
||||
def __init__(self, input=None):
|
||||
if input:
|
||||
self.load(input)
|
||||
|
||||
def __set(self, key, real_value, coded_value):
|
||||
"""Private method for setting a cookie's value"""
|
||||
M = self.get(key, Morsel())
|
||||
M.set(key, real_value, coded_value)
|
||||
dict.__setitem__(self, key, M)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""Dictionary style assignment."""
|
||||
if isinstance(value, Morsel):
|
||||
# allow assignment of constructed Morsels (e.g. for pickling)
|
||||
dict.__setitem__(self, key, value)
|
||||
else:
|
||||
rval, cval = self.value_encode(value)
|
||||
self.__set(key, rval, cval)
|
||||
|
||||
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
|
||||
"""Return a string suitable for HTTP."""
|
||||
result = []
|
||||
items = sorted(self.items())
|
||||
for key, value in items:
|
||||
result.append(value.output(attrs, header))
|
||||
return sep.join(result)
|
||||
|
||||
__str__ = output
|
||||
|
||||
def __repr__(self):
|
||||
l = []
|
||||
items = sorted(self.items())
|
||||
for key, value in items:
|
||||
l.append('%s=%s' % (key, repr(value.value)))
|
||||
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l))
|
||||
|
||||
def js_output(self, attrs=None):
|
||||
"""Return a string suitable for JavaScript."""
|
||||
result = []
|
||||
items = sorted(self.items())
|
||||
for key, value in items:
|
||||
result.append(value.js_output(attrs))
|
||||
return _nulljoin(result)
|
||||
|
||||
def load(self, rawdata):
|
||||
"""Load cookies from a string (presumably HTTP_COOKIE) or
|
||||
from a dictionary. Loading cookies from a dictionary 'd'
|
||||
is equivalent to calling:
|
||||
map(Cookie.__setitem__, d.keys(), d.values())
|
||||
"""
|
||||
if isinstance(rawdata, str):
|
||||
self.__parse_string(rawdata)
|
||||
else:
|
||||
# self.update() wouldn't call our custom __setitem__
|
||||
for key, value in rawdata.items():
|
||||
self[key] = value
|
||||
return
|
||||
|
||||
def __parse_string(self, str, patt=_CookiePattern):
|
||||
i = 0 # Our starting point
|
||||
n = len(str) # Length of string
|
||||
parsed_items = [] # Parsed (type, key, value) triples
|
||||
morsel_seen = False # A key=value pair was previously encountered
|
||||
|
||||
TYPE_ATTRIBUTE = 1
|
||||
TYPE_KEYVALUE = 2
|
||||
|
||||
# We first parse the whole cookie string and reject it if it's
|
||||
# syntactically invalid (this helps avoid some classes of injection
|
||||
# attacks).
|
||||
while 0 <= i < n:
|
||||
# Start looking for a cookie
|
||||
match = patt.match(str, i)
|
||||
if not match:
|
||||
# No more cookies
|
||||
break
|
||||
|
||||
key, value = match.group("key"), match.group("val")
|
||||
i = match.end(0)
|
||||
|
||||
if key[0] == "$":
|
||||
if not morsel_seen:
|
||||
# We ignore attributes which pertain to the cookie
|
||||
# mechanism as a whole, such as "$Version".
|
||||
# See RFC 2965. (Does anyone care?)
|
||||
continue
|
||||
parsed_items.append((TYPE_ATTRIBUTE, key[1:], value))
|
||||
elif key.lower() in Morsel._reserved:
|
||||
if not morsel_seen:
|
||||
# Invalid cookie string
|
||||
return
|
||||
if value is None:
|
||||
if key.lower() in Morsel._flags:
|
||||
parsed_items.append((TYPE_ATTRIBUTE, key, True))
|
||||
else:
|
||||
# Invalid cookie string
|
||||
return
|
||||
else:
|
||||
parsed_items.append((TYPE_ATTRIBUTE, key, _unquote(value)))
|
||||
elif value is not None:
|
||||
parsed_items.append((TYPE_KEYVALUE, key, self.value_decode(value)))
|
||||
morsel_seen = True
|
||||
else:
|
||||
# Invalid cookie string
|
||||
return
|
||||
|
||||
# The cookie string is valid, apply it.
|
||||
M = None # current morsel
|
||||
for tp, key, value in parsed_items:
|
||||
if tp == TYPE_ATTRIBUTE:
|
||||
assert M is not None
|
||||
M[key] = value
|
||||
else:
|
||||
assert tp == TYPE_KEYVALUE
|
||||
rval, cval = value
|
||||
self.__set(key, rval, cval)
|
||||
M = self[key]
|
||||
|
||||
|
||||
class SimpleCookie(BaseCookie):
|
||||
"""
|
||||
SimpleCookie supports strings as cookie values. When setting
|
||||
the value using the dictionary assignment notation, SimpleCookie
|
||||
calls the builtin str() to convert the value to a string. Values
|
||||
received from HTTP are kept as strings.
|
||||
"""
|
||||
def value_decode(self, val):
|
||||
return _unquote(val), val
|
||||
|
||||
def value_encode(self, val):
|
||||
strval = str(val)
|
||||
return strval, _quote(strval)
|
||||
1266
venv/lib/python3.12/site-packages/eventlet/green/http/server.py
Normal file
1266
venv/lib/python3.12/site-packages/eventlet/green/http/server.py
Normal file
File diff suppressed because it is too large
Load Diff
18
venv/lib/python3.12/site-packages/eventlet/green/httplib.py
Normal file
18
venv/lib/python3.12/site-packages/eventlet/green/httplib.py
Normal file
@ -0,0 +1,18 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import socket
|
||||
|
||||
to_patch = [('socket', socket)]
|
||||
|
||||
try:
|
||||
from eventlet.green import ssl
|
||||
to_patch.append(('ssl', ssl))
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from eventlet.green.http import client
|
||||
for name in dir(client):
|
||||
if name not in patcher.__exclude:
|
||||
globals()[name] = getattr(client, name)
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
133
venv/lib/python3.12/site-packages/eventlet/green/os.py
Normal file
133
venv/lib/python3.12/site-packages/eventlet/green/os.py
Normal file
@ -0,0 +1,133 @@
|
||||
os_orig = __import__("os")
|
||||
import errno
|
||||
socket = __import__("socket")
|
||||
from stat import S_ISREG
|
||||
|
||||
from eventlet import greenio
|
||||
from eventlet.support import get_errno
|
||||
from eventlet import greenthread
|
||||
from eventlet import hubs
|
||||
from eventlet.patcher import slurp_properties
|
||||
|
||||
__all__ = os_orig.__all__
|
||||
__patched__ = ['fdopen', 'read', 'write', 'wait', 'waitpid', 'open']
|
||||
|
||||
slurp_properties(
|
||||
os_orig,
|
||||
globals(),
|
||||
ignore=__patched__,
|
||||
srckeys=dir(os_orig))
|
||||
|
||||
|
||||
def fdopen(fd, *args, **kw):
|
||||
"""fdopen(fd [, mode='r' [, bufsize]]) -> file_object
|
||||
|
||||
Return an open file object connected to a file descriptor."""
|
||||
if not isinstance(fd, int):
|
||||
raise TypeError('fd should be int, not %r' % fd)
|
||||
try:
|
||||
return greenio.GreenPipe(fd, *args, **kw)
|
||||
except OSError as e:
|
||||
raise OSError(*e.args)
|
||||
|
||||
|
||||
__original_read__ = os_orig.read
|
||||
|
||||
|
||||
def read(fd, n):
|
||||
"""read(fd, buffersize) -> string
|
||||
|
||||
Read a file descriptor."""
|
||||
while True:
|
||||
# don't wait to read for regular files
|
||||
# select/poll will always return True while epoll will simply crash
|
||||
st_mode = os_orig.stat(fd).st_mode
|
||||
if not S_ISREG(st_mode):
|
||||
try:
|
||||
hubs.trampoline(fd, read=True)
|
||||
except hubs.IOClosed:
|
||||
return ''
|
||||
|
||||
try:
|
||||
return __original_read__(fd, n)
|
||||
except OSError as e:
|
||||
if get_errno(e) == errno.EPIPE:
|
||||
return ''
|
||||
if get_errno(e) != errno.EAGAIN:
|
||||
raise
|
||||
|
||||
|
||||
__original_write__ = os_orig.write
|
||||
|
||||
|
||||
def write(fd, st):
|
||||
"""write(fd, string) -> byteswritten
|
||||
|
||||
Write a string to a file descriptor.
|
||||
"""
|
||||
while True:
|
||||
# don't wait to write for regular files
|
||||
# select/poll will always return True while epoll will simply crash
|
||||
st_mode = os_orig.stat(fd).st_mode
|
||||
if not S_ISREG(st_mode):
|
||||
try:
|
||||
hubs.trampoline(fd, write=True)
|
||||
except hubs.IOClosed:
|
||||
return 0
|
||||
|
||||
try:
|
||||
return __original_write__(fd, st)
|
||||
except OSError as e:
|
||||
if get_errno(e) not in [errno.EAGAIN, errno.EPIPE]:
|
||||
raise
|
||||
|
||||
|
||||
def wait():
|
||||
"""wait() -> (pid, status)
|
||||
|
||||
Wait for completion of a child process."""
|
||||
return waitpid(0, 0)
|
||||
|
||||
|
||||
__original_waitpid__ = os_orig.waitpid
|
||||
|
||||
|
||||
def waitpid(pid, options):
|
||||
"""waitpid(...)
|
||||
waitpid(pid, options) -> (pid, status)
|
||||
|
||||
Wait for completion of a given child process."""
|
||||
if options & os_orig.WNOHANG != 0:
|
||||
return __original_waitpid__(pid, options)
|
||||
else:
|
||||
new_options = options | os_orig.WNOHANG
|
||||
while True:
|
||||
rpid, status = __original_waitpid__(pid, new_options)
|
||||
if rpid and status >= 0:
|
||||
return rpid, status
|
||||
greenthread.sleep(0.01)
|
||||
|
||||
|
||||
__original_open__ = os_orig.open
|
||||
|
||||
|
||||
def open(file, flags, mode=0o777, dir_fd=None):
|
||||
""" Wrap os.open
|
||||
This behaves identically, but collaborates with
|
||||
the hub's notify_opened protocol.
|
||||
"""
|
||||
# pathlib workaround #534 pathlib._NormalAccessor wraps `open` in
|
||||
# `staticmethod` for py < 3.7 but not 3.7. That means we get here with
|
||||
# `file` being a pathlib._NormalAccessor object, and the other arguments
|
||||
# shifted. Fortunately pathlib doesn't use the `dir_fd` argument, so we
|
||||
# have space in the parameter list. We use some heuristics to detect this
|
||||
# and adjust the parameters (without importing pathlib)
|
||||
if type(file).__name__ == '_NormalAccessor':
|
||||
file, flags, mode, dir_fd = flags, mode, dir_fd, None
|
||||
|
||||
if dir_fd is not None:
|
||||
fd = __original_open__(file, flags, mode, dir_fd=dir_fd)
|
||||
else:
|
||||
fd = __original_open__(file, flags, mode)
|
||||
hubs.notify_opened(fd)
|
||||
return fd
|
||||
257
venv/lib/python3.12/site-packages/eventlet/green/profile.py
Normal file
257
venv/lib/python3.12/site-packages/eventlet/green/profile.py
Normal file
@ -0,0 +1,257 @@
|
||||
# Copyright (c) 2010, CCP Games
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# * Neither the name of CCP Games nor the
|
||||
# names of its contributors may be used to endorse or promote products
|
||||
# derived from this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY CCP GAMES ``AS IS'' AND ANY
|
||||
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL CCP GAMES BE LIABLE FOR ANY
|
||||
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""This module is API-equivalent to the standard library :mod:`profile` module
|
||||
lbut it is greenthread-aware as well as thread-aware. Use this module
|
||||
to profile Eventlet-based applications in preference to either :mod:`profile` or :mod:`cProfile`.
|
||||
FIXME: No testcases for this module.
|
||||
"""
|
||||
|
||||
profile_orig = __import__('profile')
|
||||
__all__ = profile_orig.__all__
|
||||
|
||||
from eventlet.patcher import slurp_properties
|
||||
slurp_properties(profile_orig, globals(), srckeys=dir(profile_orig))
|
||||
|
||||
import sys
|
||||
import functools
|
||||
|
||||
from eventlet import greenthread
|
||||
from eventlet import patcher
|
||||
import _thread
|
||||
|
||||
thread = patcher.original(_thread.__name__) # non-monkeypatched module needed
|
||||
|
||||
|
||||
# This class provides the start() and stop() functions
|
||||
class Profile(profile_orig.Profile):
|
||||
base = profile_orig.Profile
|
||||
|
||||
def __init__(self, timer=None, bias=None):
|
||||
self.current_tasklet = greenthread.getcurrent()
|
||||
self.thread_id = thread.get_ident()
|
||||
self.base.__init__(self, timer, bias)
|
||||
self.sleeping = {}
|
||||
|
||||
def __call__(self, *args):
|
||||
"""make callable, allowing an instance to be the profiler"""
|
||||
self.dispatcher(*args)
|
||||
|
||||
def _setup(self):
|
||||
self._has_setup = True
|
||||
self.cur = None
|
||||
self.timings = {}
|
||||
self.current_tasklet = greenthread.getcurrent()
|
||||
self.thread_id = thread.get_ident()
|
||||
self.simulate_call("profiler")
|
||||
|
||||
def start(self, name="start"):
|
||||
if getattr(self, "running", False):
|
||||
return
|
||||
self._setup()
|
||||
self.simulate_call("start")
|
||||
self.running = True
|
||||
sys.setprofile(self.dispatcher)
|
||||
|
||||
def stop(self):
|
||||
sys.setprofile(None)
|
||||
self.running = False
|
||||
self.TallyTimings()
|
||||
|
||||
# special cases for the original run commands, makin sure to
|
||||
# clear the timer context.
|
||||
def runctx(self, cmd, globals, locals):
|
||||
if not getattr(self, "_has_setup", False):
|
||||
self._setup()
|
||||
try:
|
||||
return profile_orig.Profile.runctx(self, cmd, globals, locals)
|
||||
finally:
|
||||
self.TallyTimings()
|
||||
|
||||
def runcall(self, func, *args, **kw):
|
||||
if not getattr(self, "_has_setup", False):
|
||||
self._setup()
|
||||
try:
|
||||
return profile_orig.Profile.runcall(self, func, *args, **kw)
|
||||
finally:
|
||||
self.TallyTimings()
|
||||
|
||||
def trace_dispatch_return_extend_back(self, frame, t):
|
||||
"""A hack function to override error checking in parent class. It
|
||||
allows invalid returns (where frames weren't preveiously entered into
|
||||
the profiler) which can happen for all the tasklets that suddenly start
|
||||
to get monitored. This means that the time will eventually be attributed
|
||||
to a call high in the chain, when there is a tasklet switch
|
||||
"""
|
||||
if isinstance(self.cur[-2], Profile.fake_frame):
|
||||
return False
|
||||
self.trace_dispatch_call(frame, 0)
|
||||
return self.trace_dispatch_return(frame, t)
|
||||
|
||||
def trace_dispatch_c_return_extend_back(self, frame, t):
|
||||
# same for c return
|
||||
if isinstance(self.cur[-2], Profile.fake_frame):
|
||||
return False # ignore bogus returns
|
||||
self.trace_dispatch_c_call(frame, 0)
|
||||
return self.trace_dispatch_return(frame, t)
|
||||
|
||||
def SwitchTasklet(self, t0, t1, t):
|
||||
# tally the time spent in the old tasklet
|
||||
pt, it, et, fn, frame, rcur = self.cur
|
||||
cur = (pt, it + t, et, fn, frame, rcur)
|
||||
|
||||
# we are switching to a new tasklet, store the old
|
||||
self.sleeping[t0] = cur, self.timings
|
||||
self.current_tasklet = t1
|
||||
|
||||
# find the new one
|
||||
try:
|
||||
self.cur, self.timings = self.sleeping.pop(t1)
|
||||
except KeyError:
|
||||
self.cur, self.timings = None, {}
|
||||
self.simulate_call("profiler")
|
||||
self.simulate_call("new_tasklet")
|
||||
|
||||
def TallyTimings(self):
|
||||
oldtimings = self.sleeping
|
||||
self.sleeping = {}
|
||||
|
||||
# first, unwind the main "cur"
|
||||
self.cur = self.Unwind(self.cur, self.timings)
|
||||
|
||||
# we must keep the timings dicts separate for each tasklet, since it contains
|
||||
# the 'ns' item, recursion count of each function in that tasklet. This is
|
||||
# used in the Unwind dude.
|
||||
for tasklet, (cur, timings) in oldtimings.items():
|
||||
self.Unwind(cur, timings)
|
||||
|
||||
for k, v in timings.items():
|
||||
if k not in self.timings:
|
||||
self.timings[k] = v
|
||||
else:
|
||||
# accumulate all to the self.timings
|
||||
cc, ns, tt, ct, callers = self.timings[k]
|
||||
# ns should be 0 after unwinding
|
||||
cc += v[0]
|
||||
tt += v[2]
|
||||
ct += v[3]
|
||||
for k1, v1 in v[4].items():
|
||||
callers[k1] = callers.get(k1, 0) + v1
|
||||
self.timings[k] = cc, ns, tt, ct, callers
|
||||
|
||||
def Unwind(self, cur, timings):
|
||||
"A function to unwind a 'cur' frame and tally the results"
|
||||
"see profile.trace_dispatch_return() for details"
|
||||
# also see simulate_cmd_complete()
|
||||
while(cur[-1]):
|
||||
rpt, rit, ret, rfn, frame, rcur = cur
|
||||
frame_total = rit + ret
|
||||
|
||||
if rfn in timings:
|
||||
cc, ns, tt, ct, callers = timings[rfn]
|
||||
else:
|
||||
cc, ns, tt, ct, callers = 0, 0, 0, 0, {}
|
||||
|
||||
if not ns:
|
||||
ct = ct + frame_total
|
||||
cc = cc + 1
|
||||
|
||||
if rcur:
|
||||
ppt, pit, pet, pfn, pframe, pcur = rcur
|
||||
else:
|
||||
pfn = None
|
||||
|
||||
if pfn in callers:
|
||||
callers[pfn] = callers[pfn] + 1 # hack: gather more
|
||||
elif pfn:
|
||||
callers[pfn] = 1
|
||||
|
||||
timings[rfn] = cc, ns - 1, tt + rit, ct, callers
|
||||
|
||||
ppt, pit, pet, pfn, pframe, pcur = rcur
|
||||
rcur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
|
||||
cur = rcur
|
||||
return cur
|
||||
|
||||
|
||||
def ContextWrap(f):
|
||||
@functools.wraps(f)
|
||||
def ContextWrapper(self, arg, t):
|
||||
current = greenthread.getcurrent()
|
||||
if current != self.current_tasklet:
|
||||
self.SwitchTasklet(self.current_tasklet, current, t)
|
||||
t = 0.0 # the time was billed to the previous tasklet
|
||||
return f(self, arg, t)
|
||||
return ContextWrapper
|
||||
|
||||
|
||||
# Add "return safety" to the dispatchers
|
||||
Profile.dispatch = dict(profile_orig.Profile.dispatch, **{
|
||||
'return': Profile.trace_dispatch_return_extend_back,
|
||||
'c_return': Profile.trace_dispatch_c_return_extend_back,
|
||||
})
|
||||
# Add automatic tasklet detection to the callbacks.
|
||||
Profile.dispatch = {k: ContextWrap(v) for k, v in Profile.dispatch.items()}
|
||||
|
||||
|
||||
# run statements shamelessly stolen from profile.py
|
||||
def run(statement, filename=None, sort=-1):
|
||||
"""Run statement under profiler optionally saving results in filename
|
||||
|
||||
This function takes a single argument that can be passed to the
|
||||
"exec" statement, and an optional file name. In all cases this
|
||||
routine attempts to "exec" its first argument and gather profiling
|
||||
statistics from the execution. If no file name is present, then this
|
||||
function automatically prints a simple profiling report, sorted by the
|
||||
standard name string (file/line/function-name) that is presented in
|
||||
each line.
|
||||
"""
|
||||
prof = Profile()
|
||||
try:
|
||||
prof = prof.run(statement)
|
||||
except SystemExit:
|
||||
pass
|
||||
if filename is not None:
|
||||
prof.dump_stats(filename)
|
||||
else:
|
||||
return prof.print_stats(sort)
|
||||
|
||||
|
||||
def runctx(statement, globals, locals, filename=None):
|
||||
"""Run statement under profiler, supplying your own globals and locals,
|
||||
optionally saving results in filename.
|
||||
|
||||
statement and filename have the same semantics as profile.run
|
||||
"""
|
||||
prof = Profile()
|
||||
try:
|
||||
prof = prof.runctx(statement, globals, locals)
|
||||
except SystemExit:
|
||||
pass
|
||||
|
||||
if filename is not None:
|
||||
prof.dump_stats(filename)
|
||||
else:
|
||||
return prof.print_stats()
|
||||
86
venv/lib/python3.12/site-packages/eventlet/green/select.py
Normal file
86
venv/lib/python3.12/site-packages/eventlet/green/select.py
Normal file
@ -0,0 +1,86 @@
|
||||
import eventlet
|
||||
from eventlet.hubs import get_hub
|
||||
__select = eventlet.patcher.original('select')
|
||||
error = __select.error
|
||||
|
||||
|
||||
__patched__ = ['select']
|
||||
__deleted__ = ['devpoll', 'poll', 'epoll', 'kqueue', 'kevent']
|
||||
|
||||
|
||||
def get_fileno(obj):
|
||||
# The purpose of this function is to exactly replicate
|
||||
# the behavior of the select module when confronted with
|
||||
# abnormal filenos; the details are extensively tested in
|
||||
# the stdlib test/test_select.py.
|
||||
try:
|
||||
f = obj.fileno
|
||||
except AttributeError:
|
||||
if not isinstance(obj, int):
|
||||
raise TypeError("Expected int or long, got %s" % type(obj))
|
||||
return obj
|
||||
else:
|
||||
rv = f()
|
||||
if not isinstance(rv, int):
|
||||
raise TypeError("Expected int or long, got %s" % type(rv))
|
||||
return rv
|
||||
|
||||
|
||||
def select(read_list, write_list, error_list, timeout=None):
|
||||
# error checking like this is required by the stdlib unit tests
|
||||
if timeout is not None:
|
||||
try:
|
||||
timeout = float(timeout)
|
||||
except ValueError:
|
||||
raise TypeError("Expected number for timeout")
|
||||
hub = get_hub()
|
||||
timers = []
|
||||
current = eventlet.getcurrent()
|
||||
if hub.greenlet is current:
|
||||
raise RuntimeError('do not call blocking functions from the mainloop')
|
||||
ds = {}
|
||||
for r in read_list:
|
||||
ds[get_fileno(r)] = {'read': r}
|
||||
for w in write_list:
|
||||
ds.setdefault(get_fileno(w), {})['write'] = w
|
||||
for e in error_list:
|
||||
ds.setdefault(get_fileno(e), {})['error'] = e
|
||||
|
||||
listeners = []
|
||||
|
||||
def on_read(d):
|
||||
original = ds[get_fileno(d)]['read']
|
||||
current.switch(([original], [], []))
|
||||
|
||||
def on_write(d):
|
||||
original = ds[get_fileno(d)]['write']
|
||||
current.switch(([], [original], []))
|
||||
|
||||
def on_timeout2():
|
||||
current.switch(([], [], []))
|
||||
|
||||
def on_timeout():
|
||||
# ensure that BaseHub.run() has a chance to call self.wait()
|
||||
# at least once before timed out. otherwise the following code
|
||||
# can time out erroneously.
|
||||
#
|
||||
# s1, s2 = socket.socketpair()
|
||||
# print(select.select([], [s1], [], 0))
|
||||
timers.append(hub.schedule_call_global(0, on_timeout2))
|
||||
|
||||
if timeout is not None:
|
||||
timers.append(hub.schedule_call_global(timeout, on_timeout))
|
||||
try:
|
||||
for k, v in ds.items():
|
||||
if v.get('read'):
|
||||
listeners.append(hub.add(hub.READ, k, on_read, current.throw, lambda: None))
|
||||
if v.get('write'):
|
||||
listeners.append(hub.add(hub.WRITE, k, on_write, current.throw, lambda: None))
|
||||
try:
|
||||
return hub.switch()
|
||||
finally:
|
||||
for l in listeners:
|
||||
hub.remove(l)
|
||||
finally:
|
||||
for t in timers:
|
||||
t.cancel()
|
||||
@ -0,0 +1,34 @@
|
||||
import sys
|
||||
|
||||
from eventlet import patcher
|
||||
from eventlet.green import select
|
||||
|
||||
__patched__ = [
|
||||
'DefaultSelector',
|
||||
'SelectSelector',
|
||||
]
|
||||
|
||||
# We only have green select so the options are:
|
||||
# * leave it be and have selectors that block
|
||||
# * try to pretend the "bad" selectors don't exist
|
||||
# * replace all with SelectSelector for the price of possibly different
|
||||
# performance characteristic and missing fileno() method (if someone
|
||||
# uses it it'll result in a crash, we may want to implement it in the future)
|
||||
#
|
||||
# This module used to follow the third approach but just removing the offending
|
||||
# selectors is less error prone and less confusing approach.
|
||||
__deleted__ = [
|
||||
'PollSelector',
|
||||
'EpollSelector',
|
||||
'DevpollSelector',
|
||||
'KqueueSelector',
|
||||
]
|
||||
|
||||
patcher.inject('selectors', globals(), ('select', select))
|
||||
|
||||
del patcher
|
||||
|
||||
if sys.platform != 'win32':
|
||||
SelectSelector._select = staticmethod(select.select)
|
||||
|
||||
DefaultSelector = SelectSelector
|
||||
63
venv/lib/python3.12/site-packages/eventlet/green/socket.py
Normal file
63
venv/lib/python3.12/site-packages/eventlet/green/socket.py
Normal file
@ -0,0 +1,63 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
__import__('eventlet.green._socket_nodns')
|
||||
__socket = sys.modules['eventlet.green._socket_nodns']
|
||||
|
||||
__all__ = __socket.__all__
|
||||
__patched__ = __socket.__patched__ + [
|
||||
'create_connection',
|
||||
'getaddrinfo',
|
||||
'gethostbyname',
|
||||
'gethostbyname_ex',
|
||||
'getnameinfo',
|
||||
]
|
||||
|
||||
from eventlet.patcher import slurp_properties
|
||||
slurp_properties(__socket, globals(), srckeys=dir(__socket))
|
||||
|
||||
|
||||
if os.environ.get("EVENTLET_NO_GREENDNS", '').lower() != 'yes':
|
||||
from eventlet.support import greendns
|
||||
gethostbyname = greendns.gethostbyname
|
||||
getaddrinfo = greendns.getaddrinfo
|
||||
gethostbyname_ex = greendns.gethostbyname_ex
|
||||
getnameinfo = greendns.getnameinfo
|
||||
del greendns
|
||||
|
||||
|
||||
def create_connection(address,
|
||||
timeout=_GLOBAL_DEFAULT_TIMEOUT,
|
||||
source_address=None):
|
||||
"""Connect to *address* and return the socket object.
|
||||
|
||||
Convenience function. Connect to *address* (a 2-tuple ``(host,
|
||||
port)``) and return the socket object. Passing the optional
|
||||
*timeout* parameter will set the timeout on the socket instance
|
||||
before attempting to connect. If no *timeout* is supplied, the
|
||||
global default timeout setting returned by :func:`getdefaulttimeout`
|
||||
is used.
|
||||
"""
|
||||
|
||||
err = "getaddrinfo returns an empty list"
|
||||
host, port = address
|
||||
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
|
||||
af, socktype, proto, canonname, sa = res
|
||||
sock = None
|
||||
try:
|
||||
sock = socket(af, socktype, proto)
|
||||
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
|
||||
sock.settimeout(timeout)
|
||||
if source_address:
|
||||
sock.bind(source_address)
|
||||
sock.connect(sa)
|
||||
return sock
|
||||
|
||||
except error as e:
|
||||
err = e
|
||||
if sock is not None:
|
||||
sock.close()
|
||||
|
||||
if not isinstance(err, error):
|
||||
err = error(err)
|
||||
raise err
|
||||
487
venv/lib/python3.12/site-packages/eventlet/green/ssl.py
Normal file
487
venv/lib/python3.12/site-packages/eventlet/green/ssl.py
Normal file
@ -0,0 +1,487 @@
|
||||
__ssl = __import__('ssl')
|
||||
|
||||
from eventlet.patcher import slurp_properties
|
||||
slurp_properties(__ssl, globals(), srckeys=dir(__ssl))
|
||||
|
||||
import sys
|
||||
from eventlet import greenio, hubs
|
||||
from eventlet.greenio import (
|
||||
GreenSocket, CONNECT_ERR, CONNECT_SUCCESS,
|
||||
)
|
||||
from eventlet.hubs import trampoline, IOClosed
|
||||
from eventlet.support import get_errno, PY33
|
||||
from contextlib import contextmanager
|
||||
|
||||
orig_socket = __import__('socket')
|
||||
socket = orig_socket.socket
|
||||
timeout_exc = orig_socket.timeout
|
||||
|
||||
__patched__ = [
|
||||
'SSLSocket', 'SSLContext', 'wrap_socket', 'sslwrap_simple',
|
||||
'create_default_context', '_create_default_https_context']
|
||||
|
||||
_original_sslsocket = __ssl.SSLSocket
|
||||
_original_sslcontext = __ssl.SSLContext
|
||||
_is_py_3_7 = sys.version_info[:2] == (3, 7)
|
||||
_original_wrap_socket = __ssl.SSLContext.wrap_socket
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _original_ssl_context(*args, **kwargs):
|
||||
tmp_sslcontext = _original_wrap_socket.__globals__.get('SSLContext', None)
|
||||
tmp_sslsocket = _original_sslsocket._create.__globals__.get('SSLSocket', None)
|
||||
_original_sslsocket._create.__globals__['SSLSocket'] = _original_sslsocket
|
||||
_original_wrap_socket.__globals__['SSLContext'] = _original_sslcontext
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_original_wrap_socket.__globals__['SSLContext'] = tmp_sslcontext
|
||||
_original_sslsocket._create.__globals__['SSLSocket'] = tmp_sslsocket
|
||||
|
||||
|
||||
class GreenSSLSocket(_original_sslsocket):
|
||||
""" This is a green version of the SSLSocket class from the ssl module added
|
||||
in 2.6. For documentation on it, please see the Python standard
|
||||
documentation.
|
||||
|
||||
Python nonblocking ssl objects don't give errors when the other end
|
||||
of the socket is closed (they do notice when the other end is shutdown,
|
||||
though). Any write/read operations will simply hang if the socket is
|
||||
closed from the other end. There is no obvious fix for this problem;
|
||||
it appears to be a limitation of Python's ssl object implementation.
|
||||
A workaround is to set a reasonable timeout on the socket using
|
||||
settimeout(), and to close/reopen the connection when a timeout
|
||||
occurs at an unexpected juncture in the code.
|
||||
"""
|
||||
def __new__(cls, sock=None, keyfile=None, certfile=None,
|
||||
server_side=False, cert_reqs=CERT_NONE,
|
||||
ssl_version=PROTOCOL_TLS, ca_certs=None,
|
||||
do_handshake_on_connect=True, *args, **kw):
|
||||
if not isinstance(sock, GreenSocket):
|
||||
sock = GreenSocket(sock)
|
||||
with _original_ssl_context():
|
||||
context = kw.get('_context')
|
||||
if context:
|
||||
ret = _original_sslsocket._create(
|
||||
sock=sock.fd,
|
||||
server_side=server_side,
|
||||
do_handshake_on_connect=False,
|
||||
suppress_ragged_eofs=kw.get('suppress_ragged_eofs', True),
|
||||
server_hostname=kw.get('server_hostname'),
|
||||
context=context,
|
||||
session=kw.get('session'),
|
||||
)
|
||||
else:
|
||||
ret = cls._wrap_socket(
|
||||
sock=sock.fd,
|
||||
keyfile=keyfile,
|
||||
certfile=certfile,
|
||||
server_side=server_side,
|
||||
cert_reqs=cert_reqs,
|
||||
ssl_version=ssl_version,
|
||||
ca_certs=ca_certs,
|
||||
do_handshake_on_connect=False,
|
||||
ciphers=kw.get('ciphers'),
|
||||
)
|
||||
ret.keyfile = keyfile
|
||||
ret.certfile = certfile
|
||||
ret.cert_reqs = cert_reqs
|
||||
ret.ssl_version = ssl_version
|
||||
ret.ca_certs = ca_certs
|
||||
ret.__class__ = GreenSSLSocket
|
||||
return ret
|
||||
|
||||
@staticmethod
|
||||
def _wrap_socket(sock, keyfile, certfile, server_side, cert_reqs,
|
||||
ssl_version, ca_certs, do_handshake_on_connect, ciphers):
|
||||
context = _original_sslcontext(protocol=ssl_version)
|
||||
context.options |= cert_reqs
|
||||
if certfile or keyfile:
|
||||
context.load_cert_chain(
|
||||
certfile=certfile,
|
||||
keyfile=keyfile,
|
||||
)
|
||||
if ca_certs:
|
||||
context.load_verify_locations(ca_certs)
|
||||
if ciphers:
|
||||
context.set_ciphers(ciphers)
|
||||
return context.wrap_socket(
|
||||
sock=sock,
|
||||
server_side=server_side,
|
||||
do_handshake_on_connect=do_handshake_on_connect,
|
||||
)
|
||||
|
||||
# we are inheriting from SSLSocket because its constructor calls
|
||||
# do_handshake whose behavior we wish to override
|
||||
def __init__(self, sock, keyfile=None, certfile=None,
|
||||
server_side=False, cert_reqs=CERT_NONE,
|
||||
ssl_version=PROTOCOL_TLS, ca_certs=None,
|
||||
do_handshake_on_connect=True, *args, **kw):
|
||||
if not isinstance(sock, GreenSocket):
|
||||
sock = GreenSocket(sock)
|
||||
self.act_non_blocking = sock.act_non_blocking
|
||||
|
||||
# the superclass initializer trashes the methods so we remove
|
||||
# the local-object versions of them and let the actual class
|
||||
# methods shine through
|
||||
# Note: This for Python 2
|
||||
try:
|
||||
for fn in orig_socket._delegate_methods:
|
||||
delattr(self, fn)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# Python 3 SSLSocket construction process overwrites the timeout so restore it
|
||||
self._timeout = sock.gettimeout()
|
||||
|
||||
# it also sets timeout to None internally apparently (tested with 3.4.2)
|
||||
_original_sslsocket.settimeout(self, 0.0)
|
||||
assert _original_sslsocket.gettimeout(self) == 0.0
|
||||
|
||||
# see note above about handshaking
|
||||
self.do_handshake_on_connect = do_handshake_on_connect
|
||||
if do_handshake_on_connect and self._connected:
|
||||
self.do_handshake()
|
||||
|
||||
def settimeout(self, timeout):
|
||||
self._timeout = timeout
|
||||
|
||||
def gettimeout(self):
|
||||
return self._timeout
|
||||
|
||||
def setblocking(self, flag):
|
||||
if flag:
|
||||
self.act_non_blocking = False
|
||||
self._timeout = None
|
||||
else:
|
||||
self.act_non_blocking = True
|
||||
self._timeout = 0.0
|
||||
|
||||
def _call_trampolining(self, func, *a, **kw):
|
||||
if self.act_non_blocking:
|
||||
return func(*a, **kw)
|
||||
else:
|
||||
while True:
|
||||
try:
|
||||
return func(*a, **kw)
|
||||
except SSLError as exc:
|
||||
if get_errno(exc) == SSL_ERROR_WANT_READ:
|
||||
trampoline(self,
|
||||
read=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=timeout_exc('timed out'))
|
||||
elif get_errno(exc) == SSL_ERROR_WANT_WRITE:
|
||||
trampoline(self,
|
||||
write=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=timeout_exc('timed out'))
|
||||
elif _is_py_3_7 and "unexpected eof" in exc.args[1]:
|
||||
# For reasons I don't understand on 3.7 we get [ssl:
|
||||
# KRB5_S_TKT_NYV] unexpected eof while reading]
|
||||
# errors...
|
||||
raise IOClosed
|
||||
else:
|
||||
raise
|
||||
|
||||
def write(self, data):
|
||||
"""Write DATA to the underlying SSL channel. Returns
|
||||
number of bytes of DATA actually transmitted."""
|
||||
return self._call_trampolining(
|
||||
super().write, data)
|
||||
|
||||
def read(self, len=1024, buffer=None):
|
||||
"""Read up to LEN bytes and return them.
|
||||
Return zero-length string on EOF."""
|
||||
try:
|
||||
return self._call_trampolining(
|
||||
super().read, len, buffer)
|
||||
except IOClosed:
|
||||
if buffer is None:
|
||||
return b''
|
||||
else:
|
||||
return 0
|
||||
|
||||
def send(self, data, flags=0):
|
||||
if self._sslobj:
|
||||
return self._call_trampolining(
|
||||
super().send, data, flags)
|
||||
else:
|
||||
trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
|
||||
return socket.send(self, data, flags)
|
||||
|
||||
def sendto(self, data, addr, flags=0):
|
||||
# *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
|
||||
if self._sslobj:
|
||||
raise ValueError("sendto not allowed on instances of %s" %
|
||||
self.__class__)
|
||||
else:
|
||||
trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
|
||||
return socket.sendto(self, data, addr, flags)
|
||||
|
||||
def sendall(self, data, flags=0):
|
||||
# *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
|
||||
if self._sslobj:
|
||||
if flags != 0:
|
||||
raise ValueError(
|
||||
"non-zero flags not allowed in calls to sendall() on %s" %
|
||||
self.__class__)
|
||||
amount = len(data)
|
||||
count = 0
|
||||
data_to_send = data
|
||||
while (count < amount):
|
||||
v = self.send(data_to_send)
|
||||
count += v
|
||||
if v == 0:
|
||||
trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
|
||||
else:
|
||||
data_to_send = data[count:]
|
||||
return amount
|
||||
else:
|
||||
while True:
|
||||
try:
|
||||
return socket.sendall(self, data, flags)
|
||||
except orig_socket.error as e:
|
||||
if self.act_non_blocking:
|
||||
raise
|
||||
erno = get_errno(e)
|
||||
if erno in greenio.SOCKET_BLOCKING:
|
||||
trampoline(self, write=True,
|
||||
timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
|
||||
elif erno in greenio.SOCKET_CLOSED:
|
||||
return ''
|
||||
raise
|
||||
|
||||
def recv(self, buflen=1024, flags=0):
|
||||
return self._base_recv(buflen, flags, into=False)
|
||||
|
||||
def recv_into(self, buffer, nbytes=None, flags=0):
|
||||
# Copied verbatim from CPython
|
||||
if buffer and nbytes is None:
|
||||
nbytes = len(buffer)
|
||||
elif nbytes is None:
|
||||
nbytes = 1024
|
||||
# end of CPython code
|
||||
|
||||
return self._base_recv(nbytes, flags, into=True, buffer_=buffer)
|
||||
|
||||
def _base_recv(self, nbytes, flags, into, buffer_=None):
|
||||
if into:
|
||||
plain_socket_function = socket.recv_into
|
||||
else:
|
||||
plain_socket_function = socket.recv
|
||||
|
||||
# *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
|
||||
if self._sslobj:
|
||||
if flags != 0:
|
||||
raise ValueError(
|
||||
"non-zero flags not allowed in calls to %s() on %s" %
|
||||
plain_socket_function.__name__, self.__class__)
|
||||
if into:
|
||||
read = self.read(nbytes, buffer_)
|
||||
else:
|
||||
read = self.read(nbytes)
|
||||
return read
|
||||
else:
|
||||
while True:
|
||||
try:
|
||||
args = [self, nbytes, flags]
|
||||
if into:
|
||||
args.insert(1, buffer_)
|
||||
return plain_socket_function(*args)
|
||||
except orig_socket.error as e:
|
||||
if self.act_non_blocking:
|
||||
raise
|
||||
erno = get_errno(e)
|
||||
if erno in greenio.SOCKET_BLOCKING:
|
||||
try:
|
||||
trampoline(
|
||||
self, read=True,
|
||||
timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
|
||||
except IOClosed:
|
||||
return b''
|
||||
elif erno in greenio.SOCKET_CLOSED:
|
||||
return b''
|
||||
raise
|
||||
|
||||
def recvfrom(self, addr, buflen=1024, flags=0):
|
||||
if not self.act_non_blocking:
|
||||
trampoline(self, read=True, timeout=self.gettimeout(),
|
||||
timeout_exc=timeout_exc('timed out'))
|
||||
return super().recvfrom(addr, buflen, flags)
|
||||
|
||||
def recvfrom_into(self, buffer, nbytes=None, flags=0):
|
||||
if not self.act_non_blocking:
|
||||
trampoline(self, read=True, timeout=self.gettimeout(),
|
||||
timeout_exc=timeout_exc('timed out'))
|
||||
return super().recvfrom_into(buffer, nbytes, flags)
|
||||
|
||||
def unwrap(self):
|
||||
return GreenSocket(self._call_trampolining(
|
||||
super().unwrap))
|
||||
|
||||
def do_handshake(self):
|
||||
"""Perform a TLS/SSL handshake."""
|
||||
return self._call_trampolining(
|
||||
super().do_handshake)
|
||||
|
||||
def _socket_connect(self, addr):
|
||||
real_connect = socket.connect
|
||||
if self.act_non_blocking:
|
||||
return real_connect(self, addr)
|
||||
else:
|
||||
clock = hubs.get_hub().clock
|
||||
# *NOTE: gross, copied code from greenio because it's not factored
|
||||
# well enough to reuse
|
||||
if self.gettimeout() is None:
|
||||
while True:
|
||||
try:
|
||||
return real_connect(self, addr)
|
||||
except orig_socket.error as exc:
|
||||
if get_errno(exc) in CONNECT_ERR:
|
||||
trampoline(self, write=True)
|
||||
elif get_errno(exc) in CONNECT_SUCCESS:
|
||||
return
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
end = clock() + self.gettimeout()
|
||||
while True:
|
||||
try:
|
||||
real_connect(self, addr)
|
||||
except orig_socket.error as exc:
|
||||
if get_errno(exc) in CONNECT_ERR:
|
||||
trampoline(
|
||||
self, write=True,
|
||||
timeout=end - clock(), timeout_exc=timeout_exc('timed out'))
|
||||
elif get_errno(exc) in CONNECT_SUCCESS:
|
||||
return
|
||||
else:
|
||||
raise
|
||||
if clock() >= end:
|
||||
raise timeout_exc('timed out')
|
||||
|
||||
def connect(self, addr):
|
||||
"""Connects to remote ADDR, and then wraps the connection in
|
||||
an SSL channel."""
|
||||
# *NOTE: grrrrr copied this code from ssl.py because of the reference
|
||||
# to socket.connect which we don't want to call directly
|
||||
if self._sslobj:
|
||||
raise ValueError("attempt to connect already-connected SSLSocket!")
|
||||
self._socket_connect(addr)
|
||||
server_side = False
|
||||
try:
|
||||
sslwrap = _ssl.sslwrap
|
||||
except AttributeError:
|
||||
# sslwrap was removed in 3.x and later in 2.7.9
|
||||
context = self.context if PY33 else self._context
|
||||
sslobj = context._wrap_socket(self, server_side, server_hostname=self.server_hostname)
|
||||
else:
|
||||
sslobj = sslwrap(self._sock, server_side, self.keyfile, self.certfile,
|
||||
self.cert_reqs, self.ssl_version,
|
||||
self.ca_certs, *self.ciphers)
|
||||
|
||||
try:
|
||||
# This is added in Python 3.5, http://bugs.python.org/issue21965
|
||||
SSLObject
|
||||
except NameError:
|
||||
self._sslobj = sslobj
|
||||
else:
|
||||
self._sslobj = sslobj
|
||||
|
||||
if self.do_handshake_on_connect:
|
||||
self.do_handshake()
|
||||
|
||||
def accept(self):
|
||||
"""Accepts a new connection from a remote client, and returns
|
||||
a tuple containing that new connection wrapped with a server-side
|
||||
SSL channel, and the address of the remote client."""
|
||||
# RDW grr duplication of code from greenio
|
||||
if self.act_non_blocking:
|
||||
newsock, addr = socket.accept(self)
|
||||
else:
|
||||
while True:
|
||||
try:
|
||||
newsock, addr = socket.accept(self)
|
||||
break
|
||||
except orig_socket.error as e:
|
||||
if get_errno(e) not in greenio.SOCKET_BLOCKING:
|
||||
raise
|
||||
trampoline(self, read=True, timeout=self.gettimeout(),
|
||||
timeout_exc=timeout_exc('timed out'))
|
||||
|
||||
new_ssl = type(self)(
|
||||
newsock,
|
||||
server_side=True,
|
||||
do_handshake_on_connect=False,
|
||||
suppress_ragged_eofs=self.suppress_ragged_eofs,
|
||||
_context=self._context,
|
||||
)
|
||||
return (new_ssl, addr)
|
||||
|
||||
def dup(self):
|
||||
raise NotImplementedError("Can't dup an ssl object")
|
||||
|
||||
|
||||
SSLSocket = GreenSSLSocket
|
||||
|
||||
|
||||
def wrap_socket(sock, *a, **kw):
|
||||
return GreenSSLSocket(sock, *a, **kw)
|
||||
|
||||
|
||||
class GreenSSLContext(_original_sslcontext):
|
||||
__slots__ = ()
|
||||
|
||||
def wrap_socket(self, sock, *a, **kw):
|
||||
return GreenSSLSocket(sock, *a, _context=self, **kw)
|
||||
|
||||
# https://github.com/eventlet/eventlet/issues/371
|
||||
# Thanks to Gevent developers for sharing patch to this problem.
|
||||
if hasattr(_original_sslcontext.options, 'setter'):
|
||||
# In 3.6, these became properties. They want to access the
|
||||
# property __set__ method in the superclass, and they do so by using
|
||||
# super(SSLContext, SSLContext). But we rebind SSLContext when we monkey
|
||||
# patch, which causes infinite recursion.
|
||||
# https://github.com/python/cpython/commit/328067c468f82e4ec1b5c510a4e84509e010f296
|
||||
@_original_sslcontext.options.setter
|
||||
def options(self, value):
|
||||
super(_original_sslcontext, _original_sslcontext).options.__set__(self, value)
|
||||
|
||||
@_original_sslcontext.verify_flags.setter
|
||||
def verify_flags(self, value):
|
||||
super(_original_sslcontext, _original_sslcontext).verify_flags.__set__(self, value)
|
||||
|
||||
@_original_sslcontext.verify_mode.setter
|
||||
def verify_mode(self, value):
|
||||
super(_original_sslcontext, _original_sslcontext).verify_mode.__set__(self, value)
|
||||
|
||||
if hasattr(_original_sslcontext, "maximum_version"):
|
||||
@_original_sslcontext.maximum_version.setter
|
||||
def maximum_version(self, value):
|
||||
super(_original_sslcontext, _original_sslcontext).maximum_version.__set__(self, value)
|
||||
|
||||
if hasattr(_original_sslcontext, "minimum_version"):
|
||||
@_original_sslcontext.minimum_version.setter
|
||||
def minimum_version(self, value):
|
||||
super(_original_sslcontext, _original_sslcontext).minimum_version.__set__(self, value)
|
||||
|
||||
|
||||
SSLContext = GreenSSLContext
|
||||
|
||||
|
||||
# TODO: ssl.create_default_context() was added in 2.7.9.
|
||||
# Not clear we're still trying to support Python versions even older than that.
|
||||
if hasattr(__ssl, 'create_default_context'):
|
||||
_original_create_default_context = __ssl.create_default_context
|
||||
|
||||
def green_create_default_context(*a, **kw):
|
||||
# We can't just monkey-patch on the green version of `wrap_socket`
|
||||
# on to SSLContext instances, but SSLContext.create_default_context
|
||||
# does a bunch of work. Rather than re-implementing it all, just
|
||||
# switch out the __class__ to get our `wrap_socket` implementation
|
||||
context = _original_create_default_context(*a, **kw)
|
||||
context.__class__ = GreenSSLContext
|
||||
return context
|
||||
|
||||
create_default_context = green_create_default_context
|
||||
_create_default_https_context = green_create_default_context
|
||||
137
venv/lib/python3.12/site-packages/eventlet/green/subprocess.py
Normal file
137
venv/lib/python3.12/site-packages/eventlet/green/subprocess.py
Normal file
@ -0,0 +1,137 @@
|
||||
import errno
|
||||
import sys
|
||||
from types import FunctionType
|
||||
|
||||
import eventlet
|
||||
from eventlet import greenio
|
||||
from eventlet import patcher
|
||||
from eventlet.green import select, threading, time
|
||||
|
||||
|
||||
__patched__ = ['call', 'check_call', 'Popen']
|
||||
to_patch = [('select', select), ('threading', threading), ('time', time)]
|
||||
|
||||
from eventlet.green import selectors
|
||||
to_patch.append(('selectors', selectors))
|
||||
|
||||
patcher.inject('subprocess', globals(), *to_patch)
|
||||
subprocess_orig = patcher.original("subprocess")
|
||||
subprocess_imported = sys.modules.get('subprocess', subprocess_orig)
|
||||
mswindows = sys.platform == "win32"
|
||||
|
||||
|
||||
if getattr(subprocess_orig, 'TimeoutExpired', None) is None:
|
||||
# Backported from Python 3.3.
|
||||
# https://bitbucket.org/eventlet/eventlet/issue/89
|
||||
class TimeoutExpired(Exception):
|
||||
"""This exception is raised when the timeout expires while waiting for
|
||||
a child process.
|
||||
"""
|
||||
|
||||
def __init__(self, cmd, timeout, output=None):
|
||||
self.cmd = cmd
|
||||
self.timeout = timeout
|
||||
self.output = output
|
||||
|
||||
def __str__(self):
|
||||
return ("Command '%s' timed out after %s seconds" %
|
||||
(self.cmd, self.timeout))
|
||||
else:
|
||||
TimeoutExpired = subprocess_imported.TimeoutExpired
|
||||
|
||||
|
||||
# This is the meat of this module, the green version of Popen.
|
||||
class Popen(subprocess_orig.Popen):
|
||||
"""eventlet-friendly version of subprocess.Popen"""
|
||||
# We do not believe that Windows pipes support non-blocking I/O. At least,
|
||||
# the Python file objects stored on our base-class object have no
|
||||
# setblocking() method, and the Python fcntl module doesn't exist on
|
||||
# Windows. (see eventlet.greenio.set_nonblocking()) As the sole purpose of
|
||||
# this __init__() override is to wrap the pipes for eventlet-friendly
|
||||
# non-blocking I/O, don't even bother overriding it on Windows.
|
||||
if not mswindows:
|
||||
def __init__(self, args, bufsize=0, *argss, **kwds):
|
||||
self.args = args
|
||||
# Forward the call to base-class constructor
|
||||
subprocess_orig.Popen.__init__(self, args, 0, *argss, **kwds)
|
||||
# Now wrap the pipes, if any. This logic is loosely borrowed from
|
||||
# eventlet.processes.Process.run() method.
|
||||
for attr in "stdin", "stdout", "stderr":
|
||||
pipe = getattr(self, attr)
|
||||
if pipe is not None and type(pipe) != greenio.GreenPipe:
|
||||
# https://github.com/eventlet/eventlet/issues/243
|
||||
# AttributeError: '_io.TextIOWrapper' object has no attribute 'mode'
|
||||
mode = getattr(pipe, 'mode', '')
|
||||
if not mode:
|
||||
if pipe.readable():
|
||||
mode += 'r'
|
||||
if pipe.writable():
|
||||
mode += 'w'
|
||||
# ValueError: can't have unbuffered text I/O
|
||||
if bufsize == 0:
|
||||
bufsize = -1
|
||||
wrapped_pipe = greenio.GreenPipe(pipe, mode, bufsize)
|
||||
setattr(self, attr, wrapped_pipe)
|
||||
__init__.__doc__ = subprocess_orig.Popen.__init__.__doc__
|
||||
|
||||
def wait(self, timeout=None, check_interval=0.01):
|
||||
# Instead of a blocking OS call, this version of wait() uses logic
|
||||
# borrowed from the eventlet 0.2 processes.Process.wait() method.
|
||||
if timeout is not None:
|
||||
endtime = time.time() + timeout
|
||||
try:
|
||||
while True:
|
||||
status = self.poll()
|
||||
if status is not None:
|
||||
return status
|
||||
if timeout is not None and time.time() > endtime:
|
||||
raise TimeoutExpired(self.args, timeout)
|
||||
eventlet.sleep(check_interval)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ECHILD:
|
||||
# no child process, this happens if the child process
|
||||
# already died and has been cleaned up
|
||||
return -1
|
||||
else:
|
||||
raise
|
||||
wait.__doc__ = subprocess_orig.Popen.wait.__doc__
|
||||
|
||||
if not mswindows:
|
||||
# don't want to rewrite the original _communicate() method, we
|
||||
# just want a version that uses eventlet.green.select.select()
|
||||
# instead of select.select().
|
||||
_communicate = FunctionType(
|
||||
subprocess_orig.Popen._communicate.__code__,
|
||||
globals())
|
||||
try:
|
||||
_communicate_with_select = FunctionType(
|
||||
subprocess_orig.Popen._communicate_with_select.__code__,
|
||||
globals())
|
||||
_communicate_with_poll = FunctionType(
|
||||
subprocess_orig.Popen._communicate_with_poll.__code__,
|
||||
globals())
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
# Borrow subprocess.call() and check_call(), but patch them so they reference
|
||||
# OUR Popen class rather than subprocess.Popen.
|
||||
def patched_function(function):
|
||||
new_function = FunctionType(function.__code__, globals())
|
||||
new_function.__kwdefaults__ = function.__kwdefaults__
|
||||
new_function.__defaults__ = function.__defaults__
|
||||
return new_function
|
||||
|
||||
|
||||
call = patched_function(subprocess_orig.call)
|
||||
check_call = patched_function(subprocess_orig.check_call)
|
||||
# check_output is Python 2.7+
|
||||
if hasattr(subprocess_orig, 'check_output'):
|
||||
__patched__.append('check_output')
|
||||
check_output = patched_function(subprocess_orig.check_output)
|
||||
del patched_function
|
||||
|
||||
# Keep exceptions identity.
|
||||
# https://github.com/eventlet/eventlet/issues/413
|
||||
CalledProcessError = subprocess_imported.CalledProcessError
|
||||
del subprocess_imported
|
||||
176
venv/lib/python3.12/site-packages/eventlet/green/thread.py
Normal file
176
venv/lib/python3.12/site-packages/eventlet/green/thread.py
Normal file
@ -0,0 +1,176 @@
|
||||
"""Implements the standard thread module, using greenthreads."""
|
||||
import _thread as __thread
|
||||
from eventlet.support import greenlets as greenlet
|
||||
from eventlet import greenthread
|
||||
from eventlet.timeout import with_timeout
|
||||
from eventlet.lock import Lock
|
||||
import sys
|
||||
|
||||
|
||||
__patched__ = ['Lock', 'LockType', '_ThreadHandle', '_count',
|
||||
'_get_main_thread_ident', '_local', '_make_thread_handle',
|
||||
'allocate', 'allocate_lock', 'exit', 'get_ident',
|
||||
'interrupt_main', 'stack_size', 'start_joinable_thread',
|
||||
'start_new', 'start_new_thread']
|
||||
|
||||
error = __thread.error
|
||||
LockType = Lock
|
||||
__threadcount = 0
|
||||
|
||||
if hasattr(__thread, "_is_main_interpreter"):
|
||||
_is_main_interpreter = __thread._is_main_interpreter
|
||||
|
||||
|
||||
def _set_sentinel():
|
||||
# TODO this is a dummy code, reimplementing this may be needed:
|
||||
# https://hg.python.org/cpython/file/b5e9bc4352e1/Modules/_threadmodule.c#l1203
|
||||
return allocate_lock()
|
||||
|
||||
|
||||
TIMEOUT_MAX = __thread.TIMEOUT_MAX
|
||||
|
||||
|
||||
def _count():
|
||||
return __threadcount
|
||||
|
||||
|
||||
def get_ident(gr=None):
|
||||
if gr is None:
|
||||
return id(greenlet.getcurrent())
|
||||
else:
|
||||
return id(gr)
|
||||
|
||||
|
||||
def __thread_body(func, args, kwargs):
|
||||
global __threadcount
|
||||
__threadcount += 1
|
||||
try:
|
||||
func(*args, **kwargs)
|
||||
finally:
|
||||
__threadcount -= 1
|
||||
|
||||
|
||||
class _ThreadHandle:
|
||||
def __init__(self, greenthread=None):
|
||||
self._greenthread = greenthread
|
||||
self._done = False
|
||||
|
||||
def _set_done(self):
|
||||
self._done = True
|
||||
|
||||
def is_done(self):
|
||||
return self._done
|
||||
|
||||
@property
|
||||
def ident(self):
|
||||
return get_ident(self._greenthread)
|
||||
|
||||
def join(self, timeout=None):
|
||||
if not hasattr(self._greenthread, "wait"):
|
||||
return
|
||||
if timeout is not None:
|
||||
return with_timeout(timeout, self._greenthread.wait)
|
||||
return self._greenthread.wait()
|
||||
|
||||
|
||||
def _make_thread_handle(ident):
|
||||
greenthread = greenlet.getcurrent()
|
||||
assert ident == get_ident(greenthread)
|
||||
return _ThreadHandle(greenthread=greenthread)
|
||||
|
||||
|
||||
def __spawn_green(function, args=(), kwargs=None, joinable=False):
|
||||
if ((3, 4) <= sys.version_info < (3, 13)
|
||||
and getattr(function, '__module__', '') == 'threading'
|
||||
and hasattr(function, '__self__')):
|
||||
# In Python 3.4-3.12, threading.Thread uses an internal lock
|
||||
# automatically released when the python thread state is deleted.
|
||||
# With monkey patching, eventlet uses green threads without python
|
||||
# thread state, so the lock is not automatically released.
|
||||
#
|
||||
# Wrap _bootstrap_inner() to release explicitly the thread state lock
|
||||
# when the thread completes.
|
||||
thread = function.__self__
|
||||
bootstrap_inner = thread._bootstrap_inner
|
||||
|
||||
def wrap_bootstrap_inner():
|
||||
try:
|
||||
bootstrap_inner()
|
||||
finally:
|
||||
# The lock can be cleared (ex: by a fork())
|
||||
if getattr(thread, "_tstate_lock", None) is not None:
|
||||
thread._tstate_lock.release()
|
||||
|
||||
thread._bootstrap_inner = wrap_bootstrap_inner
|
||||
|
||||
kwargs = kwargs or {}
|
||||
spawn_func = greenthread.spawn if joinable else greenthread.spawn_n
|
||||
return spawn_func(__thread_body, function, args, kwargs)
|
||||
|
||||
|
||||
def start_joinable_thread(function, handle=None, daemon=True):
|
||||
g = __spawn_green(function, joinable=True)
|
||||
if handle is None:
|
||||
handle = _ThreadHandle(greenthread=g)
|
||||
else:
|
||||
handle._greenthread = g
|
||||
return handle
|
||||
|
||||
|
||||
def start_new_thread(function, args=(), kwargs=None):
|
||||
g = __spawn_green(function, args=args, kwargs=kwargs)
|
||||
return get_ident(g)
|
||||
|
||||
|
||||
start_new = start_new_thread
|
||||
|
||||
|
||||
def _get_main_thread_ident():
|
||||
greenthread = greenlet.getcurrent()
|
||||
while greenthread.parent is not None:
|
||||
greenthread = greenthread.parent
|
||||
return get_ident(greenthread)
|
||||
|
||||
|
||||
def allocate_lock(*a):
|
||||
return LockType(1)
|
||||
|
||||
|
||||
allocate = allocate_lock
|
||||
|
||||
|
||||
def exit():
|
||||
raise greenlet.GreenletExit
|
||||
|
||||
|
||||
exit_thread = __thread.exit_thread
|
||||
|
||||
|
||||
def interrupt_main():
|
||||
curr = greenlet.getcurrent()
|
||||
if curr.parent and not curr.parent.dead:
|
||||
curr.parent.throw(KeyboardInterrupt())
|
||||
else:
|
||||
raise KeyboardInterrupt()
|
||||
|
||||
|
||||
if hasattr(__thread, 'stack_size'):
|
||||
__original_stack_size__ = __thread.stack_size
|
||||
|
||||
def stack_size(size=None):
|
||||
if size is None:
|
||||
return __original_stack_size__()
|
||||
if size > __original_stack_size__():
|
||||
return __original_stack_size__(size)
|
||||
else:
|
||||
pass
|
||||
# not going to decrease stack_size, because otherwise other greenlets in
|
||||
# this thread will suffer
|
||||
|
||||
from eventlet.corolocal import local as _local
|
||||
|
||||
if hasattr(__thread, 'daemon_threads_allowed'):
|
||||
daemon_threads_allowed = __thread.daemon_threads_allowed
|
||||
|
||||
if hasattr(__thread, '_shutdown'):
|
||||
_shutdown = __thread._shutdown
|
||||
132
venv/lib/python3.12/site-packages/eventlet/green/threading.py
Normal file
132
venv/lib/python3.12/site-packages/eventlet/green/threading.py
Normal file
@ -0,0 +1,132 @@
|
||||
"""Implements the standard threading module, using greenthreads."""
|
||||
import eventlet
|
||||
from eventlet.green import thread
|
||||
from eventlet.green import time
|
||||
from eventlet.support import greenlets as greenlet
|
||||
|
||||
__patched__ = ['Lock', '_after_fork', '_allocate_lock', '_get_main_thread_ident',
|
||||
'_make_thread_handle', '_shutdown', '_sleep',
|
||||
'_start_joinable_thread', '_start_new_thread', '_ThreadHandle',
|
||||
'currentThread', 'current_thread', 'local', 'stack_size']
|
||||
|
||||
__patched__ += ['get_ident', '_set_sentinel']
|
||||
|
||||
__orig_threading = eventlet.patcher.original('threading')
|
||||
__threadlocal = __orig_threading.local()
|
||||
__patched_enumerate = None
|
||||
|
||||
|
||||
eventlet.patcher.inject(
|
||||
'threading',
|
||||
globals(),
|
||||
('_thread', thread),
|
||||
('time', time))
|
||||
|
||||
|
||||
_count = 1
|
||||
|
||||
|
||||
class _GreenThread:
|
||||
"""Wrapper for GreenThread objects to provide Thread-like attributes
|
||||
and methods"""
|
||||
|
||||
def __init__(self, g):
|
||||
global _count
|
||||
self._g = g
|
||||
self._name = 'GreenThread-%d' % _count
|
||||
_count += 1
|
||||
|
||||
def __repr__(self):
|
||||
return '<_GreenThread(%s, %r)>' % (self._name, self._g)
|
||||
|
||||
def join(self, timeout=None):
|
||||
return self._g.wait()
|
||||
|
||||
def getName(self):
|
||||
return self._name
|
||||
get_name = getName
|
||||
|
||||
def setName(self, name):
|
||||
self._name = str(name)
|
||||
set_name = setName
|
||||
|
||||
name = property(getName, setName)
|
||||
|
||||
ident = property(lambda self: id(self._g))
|
||||
|
||||
def isAlive(self):
|
||||
return True
|
||||
is_alive = isAlive
|
||||
|
||||
daemon = property(lambda self: True)
|
||||
|
||||
def isDaemon(self):
|
||||
return self.daemon
|
||||
is_daemon = isDaemon
|
||||
|
||||
|
||||
__threading = None
|
||||
|
||||
|
||||
def _fixup_thread(t):
|
||||
# Some third-party packages (lockfile) will try to patch the
|
||||
# threading.Thread class with a get_name attribute if it doesn't
|
||||
# exist. Since we might return Thread objects from the original
|
||||
# threading package that won't get patched, let's make sure each
|
||||
# individual object gets patched too our patched threading.Thread
|
||||
# class has been patched. This is why monkey patching can be bad...
|
||||
global __threading
|
||||
if not __threading:
|
||||
__threading = __import__('threading')
|
||||
|
||||
if (hasattr(__threading.Thread, 'get_name') and
|
||||
not hasattr(t, 'get_name')):
|
||||
t.get_name = t.getName
|
||||
return t
|
||||
|
||||
|
||||
def current_thread():
|
||||
global __patched_enumerate
|
||||
g = greenlet.getcurrent()
|
||||
if not g:
|
||||
# Not currently in a greenthread, fall back to standard function
|
||||
return _fixup_thread(__orig_threading.current_thread())
|
||||
|
||||
try:
|
||||
active = __threadlocal.active
|
||||
except AttributeError:
|
||||
active = __threadlocal.active = {}
|
||||
|
||||
g_id = id(g)
|
||||
t = active.get(g_id)
|
||||
if t is not None:
|
||||
return t
|
||||
|
||||
# FIXME: move import from function body to top
|
||||
# (jaketesler@github) Furthermore, I was unable to have the current_thread() return correct results from
|
||||
# threading.enumerate() unless the enumerate() function was a) imported at runtime using the gross __import__() call
|
||||
# and b) was hot-patched using patch_function().
|
||||
# https://github.com/eventlet/eventlet/issues/172#issuecomment-379421165
|
||||
if __patched_enumerate is None:
|
||||
__patched_enumerate = eventlet.patcher.patch_function(__import__('threading').enumerate)
|
||||
found = [th for th in __patched_enumerate() if th.ident == g_id]
|
||||
if found:
|
||||
return found[0]
|
||||
|
||||
# Add green thread to active if we can clean it up on exit
|
||||
def cleanup(g):
|
||||
del active[g_id]
|
||||
try:
|
||||
g.link(cleanup)
|
||||
except AttributeError:
|
||||
# Not a GreenThread type, so there's no way to hook into
|
||||
# the green thread exiting. Fall back to the standard
|
||||
# function then.
|
||||
t = _fixup_thread(__orig_threading.current_thread())
|
||||
else:
|
||||
t = active[g_id] = _GreenThread(g)
|
||||
|
||||
return t
|
||||
|
||||
|
||||
currentThread = current_thread
|
||||
6
venv/lib/python3.12/site-packages/eventlet/green/time.py
Normal file
6
venv/lib/python3.12/site-packages/eventlet/green/time.py
Normal file
@ -0,0 +1,6 @@
|
||||
__time = __import__('time')
|
||||
from eventlet.patcher import slurp_properties
|
||||
__patched__ = ['sleep']
|
||||
slurp_properties(__time, globals(), ignore=__patched__, srckeys=dir(__time))
|
||||
from eventlet.greenthread import sleep
|
||||
sleep # silence pyflakes
|
||||
@ -0,0 +1,5 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import socket
|
||||
from eventlet.green import time
|
||||
from eventlet.green import httplib
|
||||
from eventlet.green import ftplib
|
||||
@ -0,0 +1,4 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green.urllib import response
|
||||
patcher.inject('urllib.error', globals(), ('urllib.response', response))
|
||||
del patcher
|
||||
@ -0,0 +1,3 @@
|
||||
from eventlet import patcher
|
||||
patcher.inject('urllib.parse', globals())
|
||||
del patcher
|
||||
@ -0,0 +1,50 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import ftplib, http, os, socket, time
|
||||
from eventlet.green.http import client as http_client
|
||||
from eventlet.green.urllib import error, parse, response
|
||||
|
||||
# TODO should we also have green email version?
|
||||
# import email
|
||||
|
||||
|
||||
to_patch = [
|
||||
# This (http module) is needed here, otherwise test__greenness hangs
|
||||
# forever on Python 3 because parts of non-green http (including
|
||||
# http.client) leak into our patched urllib.request. There may be a nicer
|
||||
# way to handle this (I didn't dig too deep) but this does the job. Jakub
|
||||
('http', http),
|
||||
|
||||
('http.client', http_client),
|
||||
('os', os),
|
||||
('socket', socket),
|
||||
('time', time),
|
||||
('urllib.error', error),
|
||||
('urllib.parse', parse),
|
||||
('urllib.response', response),
|
||||
]
|
||||
|
||||
try:
|
||||
from eventlet.green import ssl
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
to_patch.append(('ssl', ssl))
|
||||
|
||||
patcher.inject('urllib.request', globals(), *to_patch)
|
||||
del to_patch
|
||||
|
||||
to_patch_in_functions = [('ftplib', ftplib)]
|
||||
del ftplib
|
||||
|
||||
FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, *to_patch_in_functions)
|
||||
URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, *to_patch_in_functions)
|
||||
|
||||
ftperrors = patcher.patch_function(ftperrors, *to_patch_in_functions)
|
||||
|
||||
ftpwrapper.init = patcher.patch_function(ftpwrapper.init, *to_patch_in_functions)
|
||||
ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, *to_patch_in_functions)
|
||||
|
||||
del error
|
||||
del parse
|
||||
del response
|
||||
del to_patch_in_functions
|
||||
@ -0,0 +1,3 @@
|
||||
from eventlet import patcher
|
||||
patcher.inject('urllib.response', globals())
|
||||
del patcher
|
||||
20
venv/lib/python3.12/site-packages/eventlet/green/urllib2.py
Normal file
20
venv/lib/python3.12/site-packages/eventlet/green/urllib2.py
Normal file
@ -0,0 +1,20 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import ftplib
|
||||
from eventlet.green import httplib
|
||||
from eventlet.green import socket
|
||||
from eventlet.green import ssl
|
||||
from eventlet.green import time
|
||||
from eventlet.green import urllib
|
||||
|
||||
patcher.inject(
|
||||
'urllib2',
|
||||
globals(),
|
||||
('httplib', httplib),
|
||||
('socket', socket),
|
||||
('ssl', ssl),
|
||||
('time', time),
|
||||
('urllib', urllib))
|
||||
|
||||
FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, ('ftplib', ftplib))
|
||||
|
||||
del patcher
|
||||
465
venv/lib/python3.12/site-packages/eventlet/green/zmq.py
Normal file
465
venv/lib/python3.12/site-packages/eventlet/green/zmq.py
Normal file
@ -0,0 +1,465 @@
|
||||
"""The :mod:`zmq` module wraps the :class:`Socket` and :class:`Context`
|
||||
found in :mod:`pyzmq <zmq>` to be non blocking.
|
||||
"""
|
||||
__zmq__ = __import__('zmq')
|
||||
import eventlet.hubs
|
||||
from eventlet.patcher import slurp_properties
|
||||
from eventlet.support import greenlets as greenlet
|
||||
|
||||
__patched__ = ['Context', 'Socket']
|
||||
slurp_properties(__zmq__, globals(), ignore=__patched__)
|
||||
|
||||
from collections import deque
|
||||
|
||||
try:
|
||||
# alias XREQ/XREP to DEALER/ROUTER if available
|
||||
if not hasattr(__zmq__, 'XREQ'):
|
||||
XREQ = DEALER
|
||||
if not hasattr(__zmq__, 'XREP'):
|
||||
XREP = ROUTER
|
||||
except NameError:
|
||||
pass
|
||||
|
||||
|
||||
class LockReleaseError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class _QueueLock:
|
||||
"""A Lock that can be acquired by at most one thread. Any other
|
||||
thread calling acquire will be blocked in a queue. When release
|
||||
is called, the threads are awoken in the order they blocked,
|
||||
one at a time. This lock can be required recursively by the same
|
||||
thread."""
|
||||
|
||||
def __init__(self):
|
||||
self._waiters = deque()
|
||||
self._count = 0
|
||||
self._holder = None
|
||||
self._hub = eventlet.hubs.get_hub()
|
||||
|
||||
def __nonzero__(self):
|
||||
return bool(self._count)
|
||||
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def __enter__(self):
|
||||
self.acquire()
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
self.release()
|
||||
|
||||
def acquire(self):
|
||||
current = greenlet.getcurrent()
|
||||
if (self._waiters or self._count > 0) and self._holder is not current:
|
||||
# block until lock is free
|
||||
self._waiters.append(current)
|
||||
self._hub.switch()
|
||||
w = self._waiters.popleft()
|
||||
|
||||
assert w is current, 'Waiting threads woken out of order'
|
||||
assert self._count == 0, 'After waking a thread, the lock must be unacquired'
|
||||
|
||||
self._holder = current
|
||||
self._count += 1
|
||||
|
||||
def release(self):
|
||||
if self._count <= 0:
|
||||
raise LockReleaseError("Cannot release unacquired lock")
|
||||
|
||||
self._count -= 1
|
||||
if self._count == 0:
|
||||
self._holder = None
|
||||
if self._waiters:
|
||||
# wake next
|
||||
self._hub.schedule_call_global(0, self._waiters[0].switch)
|
||||
|
||||
|
||||
class _BlockedThread:
|
||||
"""Is either empty, or represents a single blocked thread that
|
||||
blocked itself by calling the block() method. The thread can be
|
||||
awoken by calling wake(). Wake() can be called multiple times and
|
||||
all but the first call will have no effect."""
|
||||
|
||||
def __init__(self):
|
||||
self._blocked_thread = None
|
||||
self._wakeupper = None
|
||||
self._hub = eventlet.hubs.get_hub()
|
||||
|
||||
def __nonzero__(self):
|
||||
return self._blocked_thread is not None
|
||||
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def block(self, deadline=None):
|
||||
if self._blocked_thread is not None:
|
||||
raise Exception("Cannot block more than one thread on one BlockedThread")
|
||||
self._blocked_thread = greenlet.getcurrent()
|
||||
|
||||
if deadline is not None:
|
||||
self._hub.schedule_call_local(deadline - self._hub.clock(), self.wake)
|
||||
|
||||
try:
|
||||
self._hub.switch()
|
||||
finally:
|
||||
self._blocked_thread = None
|
||||
# cleanup the wakeup task
|
||||
if self._wakeupper is not None:
|
||||
# Important to cancel the wakeup task so it doesn't
|
||||
# spuriously wake this greenthread later on.
|
||||
self._wakeupper.cancel()
|
||||
self._wakeupper = None
|
||||
|
||||
def wake(self):
|
||||
"""Schedules the blocked thread to be awoken and return
|
||||
True. If wake has already been called or if there is no
|
||||
blocked thread, then this call has no effect and returns
|
||||
False."""
|
||||
if self._blocked_thread is not None and self._wakeupper is None:
|
||||
self._wakeupper = self._hub.schedule_call_global(0, self._blocked_thread.switch)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class Context(__zmq__.Context):
|
||||
"""Subclass of :class:`zmq.Context`
|
||||
"""
|
||||
|
||||
def socket(self, socket_type):
|
||||
"""Overridden method to ensure that the green version of socket is used
|
||||
|
||||
Behaves the same as :meth:`zmq.Context.socket`, but ensures
|
||||
that a :class:`Socket` with all of its send and recv methods set to be
|
||||
non-blocking is returned
|
||||
"""
|
||||
if self.closed:
|
||||
raise ZMQError(ENOTSUP)
|
||||
return Socket(self, socket_type)
|
||||
|
||||
|
||||
def _wraps(source_fn):
|
||||
"""A decorator that copies the __name__ and __doc__ from the given
|
||||
function
|
||||
"""
|
||||
def wrapper(dest_fn):
|
||||
dest_fn.__name__ = source_fn.__name__
|
||||
dest_fn.__doc__ = source_fn.__doc__
|
||||
return dest_fn
|
||||
return wrapper
|
||||
|
||||
|
||||
# Implementation notes: Each socket in 0mq contains a pipe that the
|
||||
# background IO threads use to communicate with the socket. These
|
||||
# events are important because they tell the socket when it is able to
|
||||
# send and when it has messages waiting to be received. The read end
|
||||
# of the events pipe is the same FD that getsockopt(zmq.FD) returns.
|
||||
#
|
||||
# Events are read from the socket's event pipe only on the thread that
|
||||
# the 0mq context is associated with, which is the native thread the
|
||||
# greenthreads are running on, and the only operations that cause the
|
||||
# events to be read and processed are send(), recv() and
|
||||
# getsockopt(zmq.EVENTS). This means that after doing any of these
|
||||
# three operations, the ability of the socket to send or receive a
|
||||
# message without blocking may have changed, but after the events are
|
||||
# read the FD is no longer readable so the hub may not signal our
|
||||
# listener.
|
||||
#
|
||||
# If we understand that after calling send() a message might be ready
|
||||
# to be received and that after calling recv() a message might be able
|
||||
# to be sent, what should we do next? There are two approaches:
|
||||
#
|
||||
# 1. Always wake the other thread if there is one waiting. This
|
||||
# wakeup may be spurious because the socket might not actually be
|
||||
# ready for a send() or recv(). However, if a thread is in a
|
||||
# tight-loop successfully calling send() or recv() then the wakeups
|
||||
# are naturally batched and there's very little cost added to each
|
||||
# send/recv call.
|
||||
#
|
||||
# or
|
||||
#
|
||||
# 2. Call getsockopt(zmq.EVENTS) and explicitly check if the other
|
||||
# thread should be woken up. This avoids spurious wake-ups but may
|
||||
# add overhead because getsockopt will cause all events to be
|
||||
# processed, whereas send and recv throttle processing
|
||||
# events. Admittedly, all of the events will need to be processed
|
||||
# eventually, but it is likely faster to batch the processing.
|
||||
#
|
||||
# Which approach is better? I have no idea.
|
||||
#
|
||||
# TODO:
|
||||
# - Support MessageTrackers and make MessageTracker.wait green
|
||||
|
||||
_Socket = __zmq__.Socket
|
||||
_Socket_recv = _Socket.recv
|
||||
_Socket_send = _Socket.send
|
||||
_Socket_send_multipart = _Socket.send_multipart
|
||||
_Socket_recv_multipart = _Socket.recv_multipart
|
||||
_Socket_send_string = _Socket.send_string
|
||||
_Socket_recv_string = _Socket.recv_string
|
||||
_Socket_send_pyobj = _Socket.send_pyobj
|
||||
_Socket_recv_pyobj = _Socket.recv_pyobj
|
||||
_Socket_send_json = _Socket.send_json
|
||||
_Socket_recv_json = _Socket.recv_json
|
||||
_Socket_getsockopt = _Socket.getsockopt
|
||||
|
||||
|
||||
class Socket(_Socket):
|
||||
"""Green version of :class:``zmq.core.socket.Socket``.
|
||||
|
||||
The following three methods are always overridden:
|
||||
* send
|
||||
* recv
|
||||
* getsockopt
|
||||
To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
|
||||
is deferred to the hub (using :func:``eventlet.hubs.trampoline``) if a
|
||||
``zmq.EAGAIN`` (retry) error is raised.
|
||||
|
||||
For some socket types, the following methods are also overridden:
|
||||
* send_multipart
|
||||
* recv_multipart
|
||||
"""
|
||||
|
||||
def __init__(self, context, socket_type):
|
||||
super().__init__(context, socket_type)
|
||||
|
||||
self.__dict__['_eventlet_send_event'] = _BlockedThread()
|
||||
self.__dict__['_eventlet_recv_event'] = _BlockedThread()
|
||||
self.__dict__['_eventlet_send_lock'] = _QueueLock()
|
||||
self.__dict__['_eventlet_recv_lock'] = _QueueLock()
|
||||
|
||||
def event(fd):
|
||||
# Some events arrived at the zmq socket. This may mean
|
||||
# there's a message that can be read or there's space for
|
||||
# a message to be written.
|
||||
send_wake = self._eventlet_send_event.wake()
|
||||
recv_wake = self._eventlet_recv_event.wake()
|
||||
if not send_wake and not recv_wake:
|
||||
# if no waiting send or recv thread was woken up, then
|
||||
# force the zmq socket's events to be processed to
|
||||
# avoid repeated wakeups
|
||||
_Socket_getsockopt(self, EVENTS)
|
||||
|
||||
hub = eventlet.hubs.get_hub()
|
||||
self.__dict__['_eventlet_listener'] = hub.add(hub.READ,
|
||||
self.getsockopt(FD),
|
||||
event,
|
||||
lambda _: None,
|
||||
lambda: None)
|
||||
self.__dict__['_eventlet_clock'] = hub.clock
|
||||
|
||||
@_wraps(_Socket.close)
|
||||
def close(self, linger=None):
|
||||
super().close(linger)
|
||||
if self._eventlet_listener is not None:
|
||||
eventlet.hubs.get_hub().remove(self._eventlet_listener)
|
||||
self.__dict__['_eventlet_listener'] = None
|
||||
# wake any blocked threads
|
||||
self._eventlet_send_event.wake()
|
||||
self._eventlet_recv_event.wake()
|
||||
|
||||
@_wraps(_Socket.getsockopt)
|
||||
def getsockopt(self, option):
|
||||
result = _Socket_getsockopt(self, option)
|
||||
if option == EVENTS:
|
||||
# Getting the events causes the zmq socket to process
|
||||
# events which may mean a msg can be sent or received. If
|
||||
# there is a greenthread blocked and waiting for events,
|
||||
# it will miss the edge-triggered read event, so wake it
|
||||
# up.
|
||||
if (result & POLLOUT):
|
||||
self._eventlet_send_event.wake()
|
||||
if (result & POLLIN):
|
||||
self._eventlet_recv_event.wake()
|
||||
return result
|
||||
|
||||
@_wraps(_Socket.send)
|
||||
def send(self, msg, flags=0, copy=True, track=False):
|
||||
"""A send method that's safe to use when multiple greenthreads
|
||||
are calling send, send_multipart, recv and recv_multipart on
|
||||
the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
result = _Socket_send(self, msg, flags, copy, track)
|
||||
# Instead of calling both wake methods, could call
|
||||
# self.getsockopt(EVENTS) which would trigger wakeups if
|
||||
# needed.
|
||||
self._eventlet_send_event.wake()
|
||||
self._eventlet_recv_event.wake()
|
||||
return result
|
||||
|
||||
# TODO: pyzmq will copy the message buffer and create Message
|
||||
# objects under some circumstances. We could do that work here
|
||||
# once to avoid doing it every time the send is retried.
|
||||
flags |= NOBLOCK
|
||||
with self._eventlet_send_lock:
|
||||
while True:
|
||||
try:
|
||||
return _Socket_send(self, msg, flags, copy, track)
|
||||
except ZMQError as e:
|
||||
if e.errno == EAGAIN:
|
||||
self._eventlet_send_event.block()
|
||||
else:
|
||||
raise
|
||||
finally:
|
||||
# The call to send processes 0mq events and may
|
||||
# make the socket ready to recv. Wake the next
|
||||
# receiver. (Could check EVENTS for POLLIN here)
|
||||
self._eventlet_recv_event.wake()
|
||||
|
||||
@_wraps(_Socket.send_multipart)
|
||||
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
|
||||
"""A send_multipart method that's safe to use when multiple
|
||||
greenthreads are calling send, send_multipart, recv and
|
||||
recv_multipart on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_send_multipart(self, msg_parts, flags, copy, track)
|
||||
|
||||
# acquire lock here so the subsequent calls to send for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_send_lock:
|
||||
return _Socket_send_multipart(self, msg_parts, flags, copy, track)
|
||||
|
||||
@_wraps(_Socket.send_string)
|
||||
def send_string(self, u, flags=0, copy=True, encoding='utf-8'):
|
||||
"""A send_string method that's safe to use when multiple
|
||||
greenthreads are calling send, send_string, recv and
|
||||
recv_string on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_send_string(self, u, flags, copy, encoding)
|
||||
|
||||
# acquire lock here so the subsequent calls to send for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_send_lock:
|
||||
return _Socket_send_string(self, u, flags, copy, encoding)
|
||||
|
||||
@_wraps(_Socket.send_pyobj)
|
||||
def send_pyobj(self, obj, flags=0, protocol=2):
|
||||
"""A send_pyobj method that's safe to use when multiple
|
||||
greenthreads are calling send, send_pyobj, recv and
|
||||
recv_pyobj on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_send_pyobj(self, obj, flags, protocol)
|
||||
|
||||
# acquire lock here so the subsequent calls to send for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_send_lock:
|
||||
return _Socket_send_pyobj(self, obj, flags, protocol)
|
||||
|
||||
@_wraps(_Socket.send_json)
|
||||
def send_json(self, obj, flags=0, **kwargs):
|
||||
"""A send_json method that's safe to use when multiple
|
||||
greenthreads are calling send, send_json, recv and
|
||||
recv_json on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_send_json(self, obj, flags, **kwargs)
|
||||
|
||||
# acquire lock here so the subsequent calls to send for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_send_lock:
|
||||
return _Socket_send_json(self, obj, flags, **kwargs)
|
||||
|
||||
@_wraps(_Socket.recv)
|
||||
def recv(self, flags=0, copy=True, track=False):
|
||||
"""A recv method that's safe to use when multiple greenthreads
|
||||
are calling send, send_multipart, recv and recv_multipart on
|
||||
the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
msg = _Socket_recv(self, flags, copy, track)
|
||||
# Instead of calling both wake methods, could call
|
||||
# self.getsockopt(EVENTS) which would trigger wakeups if
|
||||
# needed.
|
||||
self._eventlet_send_event.wake()
|
||||
self._eventlet_recv_event.wake()
|
||||
return msg
|
||||
|
||||
deadline = None
|
||||
if hasattr(__zmq__, 'RCVTIMEO'):
|
||||
sock_timeout = self.getsockopt(__zmq__.RCVTIMEO)
|
||||
if sock_timeout == -1:
|
||||
pass
|
||||
elif sock_timeout > 0:
|
||||
deadline = self._eventlet_clock() + sock_timeout / 1000.0
|
||||
else:
|
||||
raise ValueError(sock_timeout)
|
||||
|
||||
flags |= NOBLOCK
|
||||
with self._eventlet_recv_lock:
|
||||
while True:
|
||||
try:
|
||||
return _Socket_recv(self, flags, copy, track)
|
||||
except ZMQError as e:
|
||||
if e.errno == EAGAIN:
|
||||
# zmq in its wisdom decided to reuse EAGAIN for timeouts
|
||||
if deadline is not None and self._eventlet_clock() > deadline:
|
||||
e.is_timeout = True
|
||||
raise
|
||||
|
||||
self._eventlet_recv_event.block(deadline=deadline)
|
||||
else:
|
||||
raise
|
||||
finally:
|
||||
# The call to recv processes 0mq events and may
|
||||
# make the socket ready to send. Wake the next
|
||||
# receiver. (Could check EVENTS for POLLOUT here)
|
||||
self._eventlet_send_event.wake()
|
||||
|
||||
@_wraps(_Socket.recv_multipart)
|
||||
def recv_multipart(self, flags=0, copy=True, track=False):
|
||||
"""A recv_multipart method that's safe to use when multiple
|
||||
greenthreads are calling send, send_multipart, recv and
|
||||
recv_multipart on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_recv_multipart(self, flags, copy, track)
|
||||
|
||||
# acquire lock here so the subsequent calls to recv for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_recv_lock:
|
||||
return _Socket_recv_multipart(self, flags, copy, track)
|
||||
|
||||
@_wraps(_Socket.recv_string)
|
||||
def recv_string(self, flags=0, encoding='utf-8'):
|
||||
"""A recv_string method that's safe to use when multiple
|
||||
greenthreads are calling send, send_string, recv and
|
||||
recv_string on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_recv_string(self, flags, encoding)
|
||||
|
||||
# acquire lock here so the subsequent calls to recv for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_recv_lock:
|
||||
return _Socket_recv_string(self, flags, encoding)
|
||||
|
||||
@_wraps(_Socket.recv_json)
|
||||
def recv_json(self, flags=0, **kwargs):
|
||||
"""A recv_json method that's safe to use when multiple
|
||||
greenthreads are calling send, send_json, recv and
|
||||
recv_json on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_recv_json(self, flags, **kwargs)
|
||||
|
||||
# acquire lock here so the subsequent calls to recv for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_recv_lock:
|
||||
return _Socket_recv_json(self, flags, **kwargs)
|
||||
|
||||
@_wraps(_Socket.recv_pyobj)
|
||||
def recv_pyobj(self, flags=0):
|
||||
"""A recv_pyobj method that's safe to use when multiple
|
||||
greenthreads are calling send, send_pyobj, recv and
|
||||
recv_pyobj on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_recv_pyobj(self, flags)
|
||||
|
||||
# acquire lock here so the subsequent calls to recv for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_recv_lock:
|
||||
return _Socket_recv_pyobj(self, flags)
|
||||
@ -0,0 +1,3 @@
|
||||
from eventlet.greenio.base import * # noqa
|
||||
|
||||
from eventlet.greenio.py3 import * # noqa
|
||||
492
venv/lib/python3.12/site-packages/eventlet/greenio/base.py
Normal file
492
venv/lib/python3.12/site-packages/eventlet/greenio/base.py
Normal file
@ -0,0 +1,492 @@
|
||||
import errno
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
import eventlet
|
||||
from eventlet.hubs import trampoline, notify_opened, IOClosed
|
||||
from eventlet.support import get_errno
|
||||
|
||||
__all__ = [
|
||||
'GreenSocket', '_GLOBAL_DEFAULT_TIMEOUT', 'set_nonblocking',
|
||||
'SOCKET_BLOCKING', 'SOCKET_CLOSED', 'CONNECT_ERR', 'CONNECT_SUCCESS',
|
||||
'shutdown_safe', 'SSL',
|
||||
'socket_timeout',
|
||||
]
|
||||
|
||||
BUFFER_SIZE = 4096
|
||||
CONNECT_ERR = {errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK}
|
||||
CONNECT_SUCCESS = {0, errno.EISCONN}
|
||||
if sys.platform[:3] == "win":
|
||||
CONNECT_ERR.add(errno.WSAEINVAL) # Bug 67
|
||||
|
||||
_original_socket = eventlet.patcher.original('socket').socket
|
||||
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
socket_timeout = socket.timeout # Really, TimeoutError
|
||||
else:
|
||||
socket_timeout = eventlet.timeout.wrap_is_timeout(socket.timeout)
|
||||
|
||||
|
||||
def socket_connect(descriptor, address):
|
||||
"""
|
||||
Attempts to connect to the address, returns the descriptor if it succeeds,
|
||||
returns None if it needs to trampoline, and raises any exceptions.
|
||||
"""
|
||||
err = descriptor.connect_ex(address)
|
||||
if err in CONNECT_ERR:
|
||||
return None
|
||||
if err not in CONNECT_SUCCESS:
|
||||
raise OSError(err, errno.errorcode[err])
|
||||
return descriptor
|
||||
|
||||
|
||||
def socket_checkerr(descriptor):
|
||||
err = descriptor.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
||||
if err not in CONNECT_SUCCESS:
|
||||
raise OSError(err, errno.errorcode[err])
|
||||
|
||||
|
||||
def socket_accept(descriptor):
|
||||
"""
|
||||
Attempts to accept() on the descriptor, returns a client,address tuple
|
||||
if it succeeds; returns None if it needs to trampoline, and raises
|
||||
any exceptions.
|
||||
"""
|
||||
try:
|
||||
return descriptor.accept()
|
||||
except OSError as e:
|
||||
if get_errno(e) == errno.EWOULDBLOCK:
|
||||
return None
|
||||
raise
|
||||
|
||||
|
||||
if sys.platform[:3] == "win":
|
||||
# winsock sometimes throws ENOTCONN
|
||||
SOCKET_BLOCKING = {errno.EAGAIN, errno.EWOULDBLOCK}
|
||||
SOCKET_CLOSED = {errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN}
|
||||
else:
|
||||
# oddly, on linux/darwin, an unconnected socket is expected to block,
|
||||
# so we treat ENOTCONN the same as EWOULDBLOCK
|
||||
SOCKET_BLOCKING = {errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOTCONN}
|
||||
SOCKET_CLOSED = {errno.ECONNRESET, errno.ESHUTDOWN, errno.EPIPE}
|
||||
|
||||
|
||||
def set_nonblocking(fd):
|
||||
"""
|
||||
Sets the descriptor to be nonblocking. Works on many file-like
|
||||
objects as well as sockets. Only sockets can be nonblocking on
|
||||
Windows, however.
|
||||
"""
|
||||
try:
|
||||
setblocking = fd.setblocking
|
||||
except AttributeError:
|
||||
# fd has no setblocking() method. It could be that this version of
|
||||
# Python predates socket.setblocking(). In that case, we can still set
|
||||
# the flag "by hand" on the underlying OS fileno using the fcntl
|
||||
# module.
|
||||
try:
|
||||
import fcntl
|
||||
except ImportError:
|
||||
# Whoops, Windows has no fcntl module. This might not be a socket
|
||||
# at all, but rather a file-like object with no setblocking()
|
||||
# method. In particular, on Windows, pipes don't support
|
||||
# non-blocking I/O and therefore don't have that method. Which
|
||||
# means fcntl wouldn't help even if we could load it.
|
||||
raise NotImplementedError("set_nonblocking() on a file object "
|
||||
"with no setblocking() method "
|
||||
"(Windows pipes don't support non-blocking I/O)")
|
||||
# We managed to import fcntl.
|
||||
fileno = fd.fileno()
|
||||
orig_flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
|
||||
new_flags = orig_flags | os.O_NONBLOCK
|
||||
if new_flags != orig_flags:
|
||||
fcntl.fcntl(fileno, fcntl.F_SETFL, new_flags)
|
||||
else:
|
||||
# socket supports setblocking()
|
||||
setblocking(0)
|
||||
|
||||
|
||||
try:
|
||||
from socket import _GLOBAL_DEFAULT_TIMEOUT
|
||||
except ImportError:
|
||||
_GLOBAL_DEFAULT_TIMEOUT = object()
|
||||
|
||||
|
||||
class GreenSocket:
|
||||
"""
|
||||
Green version of socket.socket class, that is intended to be 100%
|
||||
API-compatible.
|
||||
|
||||
It also recognizes the keyword parameter, 'set_nonblocking=True'.
|
||||
Pass False to indicate that socket is already in non-blocking mode
|
||||
to save syscalls.
|
||||
"""
|
||||
|
||||
# This placeholder is to prevent __getattr__ from creating an infinite call loop
|
||||
fd = None
|
||||
|
||||
def __init__(self, family=socket.AF_INET, *args, **kwargs):
|
||||
should_set_nonblocking = kwargs.pop('set_nonblocking', True)
|
||||
if isinstance(family, int):
|
||||
fd = _original_socket(family, *args, **kwargs)
|
||||
# Notify the hub that this is a newly-opened socket.
|
||||
notify_opened(fd.fileno())
|
||||
else:
|
||||
fd = family
|
||||
|
||||
# import timeout from other socket, if it was there
|
||||
try:
|
||||
self._timeout = fd.gettimeout() or socket.getdefaulttimeout()
|
||||
except AttributeError:
|
||||
self._timeout = socket.getdefaulttimeout()
|
||||
|
||||
# Filter fd.fileno() != -1 so that won't call set non-blocking on
|
||||
# closed socket
|
||||
if should_set_nonblocking and fd.fileno() != -1:
|
||||
set_nonblocking(fd)
|
||||
self.fd = fd
|
||||
# when client calls setblocking(0) or settimeout(0) the socket must
|
||||
# act non-blocking
|
||||
self.act_non_blocking = False
|
||||
|
||||
# Copy some attributes from underlying real socket.
|
||||
# This is the easiest way that i found to fix
|
||||
# https://bitbucket.org/eventlet/eventlet/issue/136
|
||||
# Only `getsockopt` is required to fix that issue, others
|
||||
# are just premature optimization to save __getattr__ call.
|
||||
self.bind = fd.bind
|
||||
self.close = fd.close
|
||||
self.fileno = fd.fileno
|
||||
self.getsockname = fd.getsockname
|
||||
self.getsockopt = fd.getsockopt
|
||||
self.listen = fd.listen
|
||||
self.setsockopt = fd.setsockopt
|
||||
self.shutdown = fd.shutdown
|
||||
self._closed = False
|
||||
|
||||
@property
|
||||
def _sock(self):
|
||||
return self
|
||||
|
||||
def _get_io_refs(self):
|
||||
return self.fd._io_refs
|
||||
|
||||
def _set_io_refs(self, value):
|
||||
self.fd._io_refs = value
|
||||
|
||||
_io_refs = property(_get_io_refs, _set_io_refs)
|
||||
|
||||
# Forward unknown attributes to fd, cache the value for future use.
|
||||
# I do not see any simple attribute which could be changed
|
||||
# so caching everything in self is fine.
|
||||
# If we find such attributes - only attributes having __get__ might be cached.
|
||||
# For now - I do not want to complicate it.
|
||||
def __getattr__(self, name):
|
||||
if self.fd is None:
|
||||
raise AttributeError(name)
|
||||
attr = getattr(self.fd, name)
|
||||
setattr(self, name, attr)
|
||||
return attr
|
||||
|
||||
def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
|
||||
""" We need to trampoline via the event hub.
|
||||
We catch any signal back from the hub indicating that the operation we
|
||||
were waiting on was associated with a filehandle that's since been
|
||||
invalidated.
|
||||
"""
|
||||
if self._closed:
|
||||
# If we did any logging, alerting to a second trampoline attempt on a closed
|
||||
# socket here would be useful.
|
||||
raise IOClosed()
|
||||
try:
|
||||
return trampoline(fd, read=read, write=write, timeout=timeout,
|
||||
timeout_exc=timeout_exc,
|
||||
mark_as_closed=self._mark_as_closed)
|
||||
except IOClosed:
|
||||
# This socket's been obsoleted. De-fang it.
|
||||
self._mark_as_closed()
|
||||
raise
|
||||
|
||||
def accept(self):
|
||||
if self.act_non_blocking:
|
||||
res = self.fd.accept()
|
||||
notify_opened(res[0].fileno())
|
||||
return res
|
||||
fd = self.fd
|
||||
_timeout_exc = socket_timeout('timed out')
|
||||
while True:
|
||||
res = socket_accept(fd)
|
||||
if res is not None:
|
||||
client, addr = res
|
||||
notify_opened(client.fileno())
|
||||
set_nonblocking(client)
|
||||
return type(self)(client), addr
|
||||
self._trampoline(fd, read=True, timeout=self.gettimeout(), timeout_exc=_timeout_exc)
|
||||
|
||||
def _mark_as_closed(self):
|
||||
""" Mark this socket as being closed """
|
||||
self._closed = True
|
||||
|
||||
def __del__(self):
|
||||
# This is in case self.close is not assigned yet (currently the constructor does it)
|
||||
close = getattr(self, 'close', None)
|
||||
if close is not None:
|
||||
close()
|
||||
|
||||
def connect(self, address):
|
||||
if self.act_non_blocking:
|
||||
return self.fd.connect(address)
|
||||
fd = self.fd
|
||||
_timeout_exc = socket_timeout('timed out')
|
||||
if self.gettimeout() is None:
|
||||
while not socket_connect(fd, address):
|
||||
try:
|
||||
self._trampoline(fd, write=True)
|
||||
except IOClosed:
|
||||
raise OSError(errno.EBADFD)
|
||||
socket_checkerr(fd)
|
||||
else:
|
||||
end = time.time() + self.gettimeout()
|
||||
while True:
|
||||
if socket_connect(fd, address):
|
||||
return
|
||||
if time.time() >= end:
|
||||
raise _timeout_exc
|
||||
timeout = end - time.time()
|
||||
try:
|
||||
self._trampoline(fd, write=True, timeout=timeout, timeout_exc=_timeout_exc)
|
||||
except IOClosed:
|
||||
# ... we need some workable errno here.
|
||||
raise OSError(errno.EBADFD)
|
||||
socket_checkerr(fd)
|
||||
|
||||
def connect_ex(self, address):
|
||||
if self.act_non_blocking:
|
||||
return self.fd.connect_ex(address)
|
||||
fd = self.fd
|
||||
if self.gettimeout() is None:
|
||||
while not socket_connect(fd, address):
|
||||
try:
|
||||
self._trampoline(fd, write=True)
|
||||
socket_checkerr(fd)
|
||||
except OSError as ex:
|
||||
return get_errno(ex)
|
||||
except IOClosed:
|
||||
return errno.EBADFD
|
||||
return 0
|
||||
else:
|
||||
end = time.time() + self.gettimeout()
|
||||
timeout_exc = socket.timeout(errno.EAGAIN)
|
||||
while True:
|
||||
try:
|
||||
if socket_connect(fd, address):
|
||||
return 0
|
||||
if time.time() >= end:
|
||||
raise timeout_exc
|
||||
self._trampoline(fd, write=True, timeout=end - time.time(),
|
||||
timeout_exc=timeout_exc)
|
||||
socket_checkerr(fd)
|
||||
except OSError as ex:
|
||||
return get_errno(ex)
|
||||
except IOClosed:
|
||||
return errno.EBADFD
|
||||
return 0
|
||||
|
||||
def dup(self, *args, **kw):
|
||||
sock = self.fd.dup(*args, **kw)
|
||||
newsock = type(self)(sock, set_nonblocking=False)
|
||||
newsock.settimeout(self.gettimeout())
|
||||
return newsock
|
||||
|
||||
def makefile(self, *args, **kwargs):
|
||||
return _original_socket.makefile(self, *args, **kwargs)
|
||||
|
||||
def makeGreenFile(self, *args, **kw):
|
||||
warnings.warn("makeGreenFile has been deprecated, please use "
|
||||
"makefile instead", DeprecationWarning, stacklevel=2)
|
||||
return self.makefile(*args, **kw)
|
||||
|
||||
def _read_trampoline(self):
|
||||
self._trampoline(
|
||||
self.fd,
|
||||
read=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket_timeout('timed out'))
|
||||
|
||||
def _recv_loop(self, recv_meth, empty_val, *args):
|
||||
if self.act_non_blocking:
|
||||
return recv_meth(*args)
|
||||
|
||||
while True:
|
||||
try:
|
||||
# recv: bufsize=0?
|
||||
# recv_into: buffer is empty?
|
||||
# This is needed because behind the scenes we use sockets in
|
||||
# nonblocking mode and builtin recv* methods. Attempting to read
|
||||
# 0 bytes from a nonblocking socket using a builtin recv* method
|
||||
# does not raise a timeout exception. Since we're simulating
|
||||
# a blocking socket here we need to produce a timeout exception
|
||||
# if needed, hence the call to trampoline.
|
||||
if not args[0]:
|
||||
self._read_trampoline()
|
||||
return recv_meth(*args)
|
||||
except OSError as e:
|
||||
if get_errno(e) in SOCKET_BLOCKING:
|
||||
pass
|
||||
elif get_errno(e) in SOCKET_CLOSED:
|
||||
return empty_val
|
||||
else:
|
||||
raise
|
||||
|
||||
try:
|
||||
self._read_trampoline()
|
||||
except IOClosed as e:
|
||||
# Perhaps we should return '' instead?
|
||||
raise EOFError()
|
||||
|
||||
def recv(self, bufsize, flags=0):
|
||||
return self._recv_loop(self.fd.recv, b'', bufsize, flags)
|
||||
|
||||
def recvfrom(self, bufsize, flags=0):
|
||||
return self._recv_loop(self.fd.recvfrom, b'', bufsize, flags)
|
||||
|
||||
def recv_into(self, buffer, nbytes=0, flags=0):
|
||||
return self._recv_loop(self.fd.recv_into, 0, buffer, nbytes, flags)
|
||||
|
||||
def recvfrom_into(self, buffer, nbytes=0, flags=0):
|
||||
return self._recv_loop(self.fd.recvfrom_into, 0, buffer, nbytes, flags)
|
||||
|
||||
def _send_loop(self, send_method, data, *args):
|
||||
if self.act_non_blocking:
|
||||
return send_method(data, *args)
|
||||
|
||||
_timeout_exc = socket_timeout('timed out')
|
||||
while True:
|
||||
try:
|
||||
return send_method(data, *args)
|
||||
except OSError as e:
|
||||
eno = get_errno(e)
|
||||
if eno == errno.ENOTCONN or eno not in SOCKET_BLOCKING:
|
||||
raise
|
||||
|
||||
try:
|
||||
self._trampoline(self.fd, write=True, timeout=self.gettimeout(),
|
||||
timeout_exc=_timeout_exc)
|
||||
except IOClosed:
|
||||
raise OSError(errno.ECONNRESET, 'Connection closed by another thread')
|
||||
|
||||
def send(self, data, flags=0):
|
||||
return self._send_loop(self.fd.send, data, flags)
|
||||
|
||||
def sendto(self, data, *args):
|
||||
return self._send_loop(self.fd.sendto, data, *args)
|
||||
|
||||
def sendall(self, data, flags=0):
|
||||
tail = self.send(data, flags)
|
||||
len_data = len(data)
|
||||
while tail < len_data:
|
||||
tail += self.send(data[tail:], flags)
|
||||
|
||||
def setblocking(self, flag):
|
||||
if flag:
|
||||
self.act_non_blocking = False
|
||||
self._timeout = None
|
||||
else:
|
||||
self.act_non_blocking = True
|
||||
self._timeout = 0.0
|
||||
|
||||
def settimeout(self, howlong):
|
||||
if howlong is None or howlong == _GLOBAL_DEFAULT_TIMEOUT:
|
||||
self.setblocking(True)
|
||||
return
|
||||
try:
|
||||
f = howlong.__float__
|
||||
except AttributeError:
|
||||
raise TypeError('a float is required')
|
||||
howlong = f()
|
||||
if howlong < 0.0:
|
||||
raise ValueError('Timeout value out of range')
|
||||
if howlong == 0.0:
|
||||
self.act_non_blocking = True
|
||||
self._timeout = 0.0
|
||||
else:
|
||||
self.act_non_blocking = False
|
||||
self._timeout = howlong
|
||||
|
||||
def gettimeout(self):
|
||||
return self._timeout
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.close()
|
||||
|
||||
if "__pypy__" in sys.builtin_module_names:
|
||||
def _reuse(self):
|
||||
getattr(self.fd, '_sock', self.fd)._reuse()
|
||||
|
||||
def _drop(self):
|
||||
getattr(self.fd, '_sock', self.fd)._drop()
|
||||
|
||||
|
||||
def _operation_on_closed_file(*args, **kwargs):
|
||||
raise ValueError("I/O operation on closed file")
|
||||
|
||||
|
||||
greenpipe_doc = """
|
||||
GreenPipe is a cooperative replacement for file class.
|
||||
It will cooperate on pipes. It will block on regular file.
|
||||
Differences from file class:
|
||||
- mode is r/w property. Should re r/o
|
||||
- encoding property not implemented
|
||||
- write/writelines will not raise TypeError exception when non-string data is written
|
||||
it will write str(data) instead
|
||||
- Universal new lines are not supported and newlines property not implementeded
|
||||
- file argument can be descriptor, file name or file object.
|
||||
"""
|
||||
|
||||
# import SSL module here so we can refer to greenio.SSL.exceptionclass
|
||||
try:
|
||||
from OpenSSL import SSL
|
||||
except ImportError:
|
||||
# pyOpenSSL not installed, define exceptions anyway for convenience
|
||||
class SSL:
|
||||
class WantWriteError(Exception):
|
||||
pass
|
||||
|
||||
class WantReadError(Exception):
|
||||
pass
|
||||
|
||||
class ZeroReturnError(Exception):
|
||||
pass
|
||||
|
||||
class SysCallError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def shutdown_safe(sock):
|
||||
"""Shuts down the socket. This is a convenience method for
|
||||
code that wants to gracefully handle regular sockets, SSL.Connection
|
||||
sockets from PyOpenSSL and ssl.SSLSocket objects from Python 2.7 interchangeably.
|
||||
Both types of ssl socket require a shutdown() before close,
|
||||
but they have different arity on their shutdown method.
|
||||
|
||||
Regular sockets don't need a shutdown before close, but it doesn't hurt.
|
||||
"""
|
||||
try:
|
||||
try:
|
||||
# socket, ssl.SSLSocket
|
||||
return sock.shutdown(socket.SHUT_RDWR)
|
||||
except TypeError:
|
||||
# SSL.Connection
|
||||
return sock.shutdown()
|
||||
except OSError as e:
|
||||
# we don't care if the socket is already closed;
|
||||
# this will often be the case in an http server context
|
||||
if get_errno(e) not in (errno.ENOTCONN, errno.EBADF, errno.ENOTSOCK):
|
||||
raise
|
||||
219
venv/lib/python3.12/site-packages/eventlet/greenio/py3.py
Normal file
219
venv/lib/python3.12/site-packages/eventlet/greenio/py3.py
Normal file
@ -0,0 +1,219 @@
|
||||
import _pyio as _original_pyio
|
||||
import errno
|
||||
import os as _original_os
|
||||
import socket as _original_socket
|
||||
from io import (
|
||||
BufferedRandom as _OriginalBufferedRandom,
|
||||
BufferedReader as _OriginalBufferedReader,
|
||||
BufferedWriter as _OriginalBufferedWriter,
|
||||
DEFAULT_BUFFER_SIZE,
|
||||
TextIOWrapper as _OriginalTextIOWrapper,
|
||||
IOBase as _OriginalIOBase,
|
||||
)
|
||||
from types import FunctionType
|
||||
|
||||
from eventlet.greenio.base import (
|
||||
_operation_on_closed_file,
|
||||
greenpipe_doc,
|
||||
set_nonblocking,
|
||||
SOCKET_BLOCKING,
|
||||
)
|
||||
from eventlet.hubs import notify_close, notify_opened, IOClosed, trampoline
|
||||
from eventlet.support import get_errno
|
||||
|
||||
__all__ = ['_fileobject', 'GreenPipe']
|
||||
|
||||
# TODO get rid of this, it only seems like the original _fileobject
|
||||
_fileobject = _original_socket.SocketIO
|
||||
|
||||
# Large part of the following code is copied from the original
|
||||
# eventlet.greenio module
|
||||
|
||||
|
||||
class GreenFileIO(_OriginalIOBase):
|
||||
def __init__(self, name, mode='r', closefd=True, opener=None):
|
||||
if isinstance(name, int):
|
||||
fileno = name
|
||||
self._name = "<fd:%d>" % fileno
|
||||
else:
|
||||
assert isinstance(name, str)
|
||||
with open(name, mode) as fd:
|
||||
self._name = fd.name
|
||||
fileno = _original_os.dup(fd.fileno())
|
||||
|
||||
notify_opened(fileno)
|
||||
self._fileno = fileno
|
||||
self._mode = mode
|
||||
self._closed = False
|
||||
set_nonblocking(self)
|
||||
self._seekable = None
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
return self._closed
|
||||
|
||||
def seekable(self):
|
||||
if self._seekable is None:
|
||||
try:
|
||||
_original_os.lseek(self._fileno, 0, _original_os.SEEK_CUR)
|
||||
except OSError as e:
|
||||
if get_errno(e) == errno.ESPIPE:
|
||||
self._seekable = False
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
self._seekable = True
|
||||
|
||||
return self._seekable
|
||||
|
||||
def readable(self):
|
||||
return 'r' in self._mode or '+' in self._mode
|
||||
|
||||
def writable(self):
|
||||
return 'w' in self._mode or '+' in self._mode or 'a' in self._mode
|
||||
|
||||
def fileno(self):
|
||||
return self._fileno
|
||||
|
||||
def read(self, size=-1):
|
||||
if size == -1:
|
||||
return self.readall()
|
||||
|
||||
while True:
|
||||
try:
|
||||
return _original_os.read(self._fileno, size)
|
||||
except OSError as e:
|
||||
if get_errno(e) not in SOCKET_BLOCKING:
|
||||
raise OSError(*e.args)
|
||||
self._trampoline(self, read=True)
|
||||
|
||||
def readall(self):
|
||||
buf = []
|
||||
while True:
|
||||
try:
|
||||
chunk = _original_os.read(self._fileno, DEFAULT_BUFFER_SIZE)
|
||||
if chunk == b'':
|
||||
return b''.join(buf)
|
||||
buf.append(chunk)
|
||||
except OSError as e:
|
||||
if get_errno(e) not in SOCKET_BLOCKING:
|
||||
raise OSError(*e.args)
|
||||
self._trampoline(self, read=True)
|
||||
|
||||
def readinto(self, b):
|
||||
up_to = len(b)
|
||||
data = self.read(up_to)
|
||||
bytes_read = len(data)
|
||||
b[:bytes_read] = data
|
||||
return bytes_read
|
||||
|
||||
def isatty(self):
|
||||
try:
|
||||
return _original_os.isatty(self.fileno())
|
||||
except OSError as e:
|
||||
raise OSError(*e.args)
|
||||
|
||||
def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
|
||||
if self._closed:
|
||||
# Don't trampoline if we're already closed.
|
||||
raise IOClosed()
|
||||
try:
|
||||
return trampoline(fd, read=read, write=write, timeout=timeout,
|
||||
timeout_exc=timeout_exc,
|
||||
mark_as_closed=self._mark_as_closed)
|
||||
except IOClosed:
|
||||
# Our fileno has been obsoleted. Defang ourselves to
|
||||
# prevent spurious closes.
|
||||
self._mark_as_closed()
|
||||
raise
|
||||
|
||||
def _mark_as_closed(self):
|
||||
""" Mark this socket as being closed """
|
||||
self._closed = True
|
||||
|
||||
def write(self, data):
|
||||
view = memoryview(data)
|
||||
datalen = len(data)
|
||||
offset = 0
|
||||
while offset < datalen:
|
||||
try:
|
||||
written = _original_os.write(self._fileno, view[offset:])
|
||||
except OSError as e:
|
||||
if get_errno(e) not in SOCKET_BLOCKING:
|
||||
raise OSError(*e.args)
|
||||
trampoline(self, write=True)
|
||||
else:
|
||||
offset += written
|
||||
return offset
|
||||
|
||||
def close(self):
|
||||
if not self._closed:
|
||||
self._closed = True
|
||||
_original_os.close(self._fileno)
|
||||
notify_close(self._fileno)
|
||||
for method in [
|
||||
'fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
|
||||
'readline', 'readlines', 'seek', 'tell', 'truncate',
|
||||
'write', 'xreadlines', '__iter__', '__next__', 'writelines']:
|
||||
setattr(self, method, _operation_on_closed_file)
|
||||
|
||||
def truncate(self, size=-1):
|
||||
if size is None:
|
||||
size = -1
|
||||
if size == -1:
|
||||
size = self.tell()
|
||||
try:
|
||||
rv = _original_os.ftruncate(self._fileno, size)
|
||||
except OSError as e:
|
||||
raise OSError(*e.args)
|
||||
else:
|
||||
self.seek(size) # move position&clear buffer
|
||||
return rv
|
||||
|
||||
def seek(self, offset, whence=_original_os.SEEK_SET):
|
||||
try:
|
||||
return _original_os.lseek(self._fileno, offset, whence)
|
||||
except OSError as e:
|
||||
raise OSError(*e.args)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.close()
|
||||
|
||||
|
||||
_open_environment = dict(globals())
|
||||
_open_environment.update(dict(
|
||||
BufferedRandom=_OriginalBufferedRandom,
|
||||
BufferedWriter=_OriginalBufferedWriter,
|
||||
BufferedReader=_OriginalBufferedReader,
|
||||
TextIOWrapper=_OriginalTextIOWrapper,
|
||||
FileIO=GreenFileIO,
|
||||
os=_original_os,
|
||||
))
|
||||
if hasattr(_original_pyio, 'text_encoding'):
|
||||
_open_environment['text_encoding'] = _original_pyio.text_encoding
|
||||
|
||||
_pyio_open = getattr(_original_pyio.open, '__wrapped__', _original_pyio.open)
|
||||
_open = FunctionType(
|
||||
_pyio_open.__code__,
|
||||
_open_environment,
|
||||
)
|
||||
|
||||
|
||||
def GreenPipe(name, mode="r", buffering=-1, encoding=None, errors=None,
|
||||
newline=None, closefd=True, opener=None):
|
||||
try:
|
||||
fileno = name.fileno()
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
fileno = _original_os.dup(fileno)
|
||||
name.close()
|
||||
name = fileno
|
||||
|
||||
return _open(name, mode, buffering, encoding, errors, newline, closefd, opener)
|
||||
|
||||
|
||||
GreenPipe.__doc__ = greenpipe_doc
|
||||
256
venv/lib/python3.12/site-packages/eventlet/greenpool.py
Normal file
256
venv/lib/python3.12/site-packages/eventlet/greenpool.py
Normal file
@ -0,0 +1,256 @@
|
||||
import traceback
|
||||
|
||||
import eventlet
|
||||
from eventlet import queue
|
||||
from eventlet.support import greenlets as greenlet
|
||||
|
||||
__all__ = ['GreenPool', 'GreenPile']
|
||||
|
||||
DEBUG = True
|
||||
|
||||
|
||||
class GreenPool:
|
||||
"""The GreenPool class is a pool of green threads.
|
||||
"""
|
||||
|
||||
def __init__(self, size=1000):
|
||||
try:
|
||||
size = int(size)
|
||||
except ValueError as e:
|
||||
msg = 'GreenPool() expect size :: int, actual: {} {}'.format(type(size), str(e))
|
||||
raise TypeError(msg)
|
||||
if size < 0:
|
||||
msg = 'GreenPool() expect size >= 0, actual: {}'.format(repr(size))
|
||||
raise ValueError(msg)
|
||||
self.size = size
|
||||
self.coroutines_running = set()
|
||||
self.sem = eventlet.Semaphore(size)
|
||||
self.no_coros_running = eventlet.Event()
|
||||
|
||||
def resize(self, new_size):
|
||||
""" Change the max number of greenthreads doing work at any given time.
|
||||
|
||||
If resize is called when there are more than *new_size* greenthreads
|
||||
already working on tasks, they will be allowed to complete but no new
|
||||
tasks will be allowed to get launched until enough greenthreads finish
|
||||
their tasks to drop the overall quantity below *new_size*. Until
|
||||
then, the return value of free() will be negative.
|
||||
"""
|
||||
size_delta = new_size - self.size
|
||||
self.sem.counter += size_delta
|
||||
self.size = new_size
|
||||
|
||||
def running(self):
|
||||
""" Returns the number of greenthreads that are currently executing
|
||||
functions in the GreenPool."""
|
||||
return len(self.coroutines_running)
|
||||
|
||||
def free(self):
|
||||
""" Returns the number of greenthreads available for use.
|
||||
|
||||
If zero or less, the next call to :meth:`spawn` or :meth:`spawn_n` will
|
||||
block the calling greenthread until a slot becomes available."""
|
||||
return self.sem.counter
|
||||
|
||||
def spawn(self, function, *args, **kwargs):
|
||||
"""Run the *function* with its arguments in its own green thread.
|
||||
Returns the :class:`GreenThread <eventlet.GreenThread>`
|
||||
object that is running the function, which can be used to retrieve the
|
||||
results.
|
||||
|
||||
If the pool is currently at capacity, ``spawn`` will block until one of
|
||||
the running greenthreads completes its task and frees up a slot.
|
||||
|
||||
This function is reentrant; *function* can call ``spawn`` on the same
|
||||
pool without risk of deadlocking the whole thing.
|
||||
"""
|
||||
# if reentering an empty pool, don't try to wait on a coroutine freeing
|
||||
# itself -- instead, just execute in the current coroutine
|
||||
current = eventlet.getcurrent()
|
||||
if self.sem.locked() and current in self.coroutines_running:
|
||||
# a bit hacky to use the GT without switching to it
|
||||
gt = eventlet.greenthread.GreenThread(current)
|
||||
gt.main(function, args, kwargs)
|
||||
return gt
|
||||
else:
|
||||
self.sem.acquire()
|
||||
gt = eventlet.spawn(function, *args, **kwargs)
|
||||
if not self.coroutines_running:
|
||||
self.no_coros_running = eventlet.Event()
|
||||
self.coroutines_running.add(gt)
|
||||
gt.link(self._spawn_done)
|
||||
return gt
|
||||
|
||||
def _spawn_n_impl(self, func, args, kwargs, coro):
|
||||
try:
|
||||
try:
|
||||
func(*args, **kwargs)
|
||||
except (KeyboardInterrupt, SystemExit, greenlet.GreenletExit):
|
||||
raise
|
||||
except:
|
||||
if DEBUG:
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
if coro is None:
|
||||
return
|
||||
else:
|
||||
coro = eventlet.getcurrent()
|
||||
self._spawn_done(coro)
|
||||
|
||||
def spawn_n(self, function, *args, **kwargs):
|
||||
"""Create a greenthread to run the *function*, the same as
|
||||
:meth:`spawn`. The difference is that :meth:`spawn_n` returns
|
||||
None; the results of *function* are not retrievable.
|
||||
"""
|
||||
# if reentering an empty pool, don't try to wait on a coroutine freeing
|
||||
# itself -- instead, just execute in the current coroutine
|
||||
current = eventlet.getcurrent()
|
||||
if self.sem.locked() and current in self.coroutines_running:
|
||||
self._spawn_n_impl(function, args, kwargs, None)
|
||||
else:
|
||||
self.sem.acquire()
|
||||
g = eventlet.spawn_n(
|
||||
self._spawn_n_impl,
|
||||
function, args, kwargs, True)
|
||||
if not self.coroutines_running:
|
||||
self.no_coros_running = eventlet.Event()
|
||||
self.coroutines_running.add(g)
|
||||
|
||||
def waitall(self):
|
||||
"""Waits until all greenthreads in the pool are finished working."""
|
||||
assert eventlet.getcurrent() not in self.coroutines_running, \
|
||||
"Calling waitall() from within one of the " \
|
||||
"GreenPool's greenthreads will never terminate."
|
||||
if self.running():
|
||||
self.no_coros_running.wait()
|
||||
|
||||
def _spawn_done(self, coro):
|
||||
self.sem.release()
|
||||
if coro is not None:
|
||||
self.coroutines_running.remove(coro)
|
||||
# if done processing (no more work is waiting for processing),
|
||||
# we can finish off any waitall() calls that might be pending
|
||||
if self.sem.balance == self.size:
|
||||
self.no_coros_running.send(None)
|
||||
|
||||
def waiting(self):
|
||||
"""Return the number of greenthreads waiting to spawn.
|
||||
"""
|
||||
if self.sem.balance < 0:
|
||||
return -self.sem.balance
|
||||
else:
|
||||
return 0
|
||||
|
||||
def _do_map(self, func, it, gi):
|
||||
for args in it:
|
||||
gi.spawn(func, *args)
|
||||
gi.done_spawning()
|
||||
|
||||
def starmap(self, function, iterable):
|
||||
"""This is the same as :func:`itertools.starmap`, except that *func* is
|
||||
executed in a separate green thread for each item, with the concurrency
|
||||
limited by the pool's size. In operation, starmap consumes a constant
|
||||
amount of memory, proportional to the size of the pool, and is thus
|
||||
suited for iterating over extremely long input lists.
|
||||
"""
|
||||
if function is None:
|
||||
function = lambda *a: a
|
||||
# We use a whole separate greenthread so its spawn() calls can block
|
||||
# without blocking OUR caller. On the other hand, we must assume that
|
||||
# our caller will immediately start trying to iterate over whatever we
|
||||
# return. If that were a GreenPile, our caller would always see an
|
||||
# empty sequence because the hub hasn't even entered _do_map() yet --
|
||||
# _do_map() hasn't had a chance to spawn a single greenthread on this
|
||||
# GreenPool! A GreenMap is safe to use with different producer and
|
||||
# consumer greenthreads, because it doesn't raise StopIteration until
|
||||
# the producer has explicitly called done_spawning().
|
||||
gi = GreenMap(self.size)
|
||||
eventlet.spawn_n(self._do_map, function, iterable, gi)
|
||||
return gi
|
||||
|
||||
def imap(self, function, *iterables):
|
||||
"""This is the same as :func:`itertools.imap`, and has the same
|
||||
concurrency and memory behavior as :meth:`starmap`.
|
||||
|
||||
It's quite convenient for, e.g., farming out jobs from a file::
|
||||
|
||||
def worker(line):
|
||||
return do_something(line)
|
||||
pool = GreenPool()
|
||||
for result in pool.imap(worker, open("filename", 'r')):
|
||||
print(result)
|
||||
"""
|
||||
return self.starmap(function, zip(*iterables))
|
||||
|
||||
|
||||
class GreenPile:
|
||||
"""GreenPile is an abstraction representing a bunch of I/O-related tasks.
|
||||
|
||||
Construct a GreenPile with an existing GreenPool object. The GreenPile will
|
||||
then use that pool's concurrency as it processes its jobs. There can be
|
||||
many GreenPiles associated with a single GreenPool.
|
||||
|
||||
A GreenPile can also be constructed standalone, not associated with any
|
||||
GreenPool. To do this, construct it with an integer size parameter instead
|
||||
of a GreenPool.
|
||||
|
||||
It is not advisable to iterate over a GreenPile in a different greenthread
|
||||
than the one which is calling spawn. The iterator will exit early in that
|
||||
situation.
|
||||
"""
|
||||
|
||||
def __init__(self, size_or_pool=1000):
|
||||
if isinstance(size_or_pool, GreenPool):
|
||||
self.pool = size_or_pool
|
||||
else:
|
||||
self.pool = GreenPool(size_or_pool)
|
||||
self.waiters = queue.LightQueue()
|
||||
self.counter = 0
|
||||
|
||||
def spawn(self, func, *args, **kw):
|
||||
"""Runs *func* in its own green thread, with the result available by
|
||||
iterating over the GreenPile object."""
|
||||
self.counter += 1
|
||||
try:
|
||||
gt = self.pool.spawn(func, *args, **kw)
|
||||
self.waiters.put(gt)
|
||||
except:
|
||||
self.counter -= 1
|
||||
raise
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
"""Wait for the next result, suspending the current greenthread until it
|
||||
is available. Raises StopIteration when there are no more results."""
|
||||
if self.counter == 0:
|
||||
raise StopIteration()
|
||||
return self._next()
|
||||
__next__ = next
|
||||
|
||||
def _next(self):
|
||||
try:
|
||||
return self.waiters.get().wait()
|
||||
finally:
|
||||
self.counter -= 1
|
||||
|
||||
|
||||
# this is identical to GreenPile but it blocks on spawn if the results
|
||||
# aren't consumed, and it doesn't generate its own StopIteration exception,
|
||||
# instead relying on the spawning process to send one in when it's done
|
||||
class GreenMap(GreenPile):
|
||||
def __init__(self, size_or_pool):
|
||||
super().__init__(size_or_pool)
|
||||
self.waiters = queue.LightQueue(maxsize=self.pool.size)
|
||||
|
||||
def done_spawning(self):
|
||||
self.spawn(lambda: StopIteration())
|
||||
|
||||
def next(self):
|
||||
val = self._next()
|
||||
if isinstance(val, StopIteration):
|
||||
raise val
|
||||
else:
|
||||
return val
|
||||
__next__ = next
|
||||
346
venv/lib/python3.12/site-packages/eventlet/greenthread.py
Normal file
346
venv/lib/python3.12/site-packages/eventlet/greenthread.py
Normal file
@ -0,0 +1,346 @@
|
||||
from collections import deque
|
||||
import sys
|
||||
|
||||
from greenlet import GreenletExit
|
||||
|
||||
from eventlet import event
|
||||
from eventlet import hubs
|
||||
from eventlet import support
|
||||
from eventlet import timeout
|
||||
from eventlet.hubs import timer
|
||||
from eventlet.support import greenlets as greenlet
|
||||
import warnings
|
||||
|
||||
__all__ = ['getcurrent', 'sleep', 'spawn', 'spawn_n',
|
||||
'kill',
|
||||
'spawn_after', 'spawn_after_local', 'GreenThread']
|
||||
|
||||
getcurrent = greenlet.getcurrent
|
||||
|
||||
|
||||
def sleep(seconds=0):
|
||||
"""Yield control to another eligible coroutine until at least *seconds* have
|
||||
elapsed.
|
||||
|
||||
*seconds* may be specified as an integer, or a float if fractional seconds
|
||||
are desired. Calling :func:`~greenthread.sleep` with *seconds* of 0 is the
|
||||
canonical way of expressing a cooperative yield. For example, if one is
|
||||
looping over a large list performing an expensive calculation without
|
||||
calling any socket methods, it's a good idea to call ``sleep(0)``
|
||||
occasionally; otherwise nothing else will run.
|
||||
"""
|
||||
hub = hubs.get_hub()
|
||||
current = getcurrent()
|
||||
if hub.greenlet is current:
|
||||
raise RuntimeError('do not call blocking functions from the mainloop')
|
||||
timer = hub.schedule_call_global(seconds, current.switch)
|
||||
try:
|
||||
hub.switch()
|
||||
finally:
|
||||
timer.cancel()
|
||||
|
||||
|
||||
def spawn(func, *args, **kwargs):
|
||||
"""Create a greenthread to run ``func(*args, **kwargs)``. Returns a
|
||||
:class:`GreenThread` object which you can use to get the results of the
|
||||
call.
|
||||
|
||||
Execution control returns immediately to the caller; the created greenthread
|
||||
is merely scheduled to be run at the next available opportunity.
|
||||
Use :func:`spawn_after` to arrange for greenthreads to be spawned
|
||||
after a finite delay.
|
||||
"""
|
||||
hub = hubs.get_hub()
|
||||
g = GreenThread(hub.greenlet)
|
||||
hub.schedule_call_global(0, g.switch, func, args, kwargs)
|
||||
return g
|
||||
|
||||
|
||||
def spawn_n(func, *args, **kwargs):
|
||||
"""Same as :func:`spawn`, but returns a ``greenlet`` object from
|
||||
which it is not possible to retrieve either a return value or
|
||||
whether it raised any exceptions. This is faster than
|
||||
:func:`spawn`; it is fastest if there are no keyword arguments.
|
||||
|
||||
If an exception is raised in the function, spawn_n prints a stack
|
||||
trace; the print can be disabled by calling
|
||||
:func:`eventlet.debug.hub_exceptions` with False.
|
||||
"""
|
||||
return _spawn_n(0, func, args, kwargs)[1]
|
||||
|
||||
|
||||
def spawn_after(seconds, func, *args, **kwargs):
|
||||
"""Spawns *func* after *seconds* have elapsed. It runs as scheduled even if
|
||||
the current greenthread has completed.
|
||||
|
||||
*seconds* may be specified as an integer, or a float if fractional seconds
|
||||
are desired. The *func* will be called with the given *args* and
|
||||
keyword arguments *kwargs*, and will be executed within its own greenthread.
|
||||
|
||||
The return value of :func:`spawn_after` is a :class:`GreenThread` object,
|
||||
which can be used to retrieve the results of the call.
|
||||
|
||||
To cancel the spawn and prevent *func* from being called,
|
||||
call :meth:`GreenThread.cancel` on the return value of :func:`spawn_after`.
|
||||
This will not abort the function if it's already started running, which is
|
||||
generally the desired behavior. If terminating *func* regardless of whether
|
||||
it's started or not is the desired behavior, call :meth:`GreenThread.kill`.
|
||||
"""
|
||||
hub = hubs.get_hub()
|
||||
g = GreenThread(hub.greenlet)
|
||||
hub.schedule_call_global(seconds, g.switch, func, args, kwargs)
|
||||
return g
|
||||
|
||||
|
||||
def spawn_after_local(seconds, func, *args, **kwargs):
|
||||
"""Spawns *func* after *seconds* have elapsed. The function will NOT be
|
||||
called if the current greenthread has exited.
|
||||
|
||||
*seconds* may be specified as an integer, or a float if fractional seconds
|
||||
are desired. The *func* will be called with the given *args* and
|
||||
keyword arguments *kwargs*, and will be executed within its own greenthread.
|
||||
|
||||
The return value of :func:`spawn_after` is a :class:`GreenThread` object,
|
||||
which can be used to retrieve the results of the call.
|
||||
|
||||
To cancel the spawn and prevent *func* from being called,
|
||||
call :meth:`GreenThread.cancel` on the return value. This will not abort the
|
||||
function if it's already started running. If terminating *func* regardless
|
||||
of whether it's started or not is the desired behavior, call
|
||||
:meth:`GreenThread.kill`.
|
||||
"""
|
||||
hub = hubs.get_hub()
|
||||
g = GreenThread(hub.greenlet)
|
||||
hub.schedule_call_local(seconds, g.switch, func, args, kwargs)
|
||||
return g
|
||||
|
||||
|
||||
def call_after_global(seconds, func, *args, **kwargs):
|
||||
warnings.warn(
|
||||
"call_after_global is renamed to spawn_after, which"
|
||||
"has the same signature and semantics (plus a bit extra). Please do a"
|
||||
" quick search-and-replace on your codebase, thanks!",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
return _spawn_n(seconds, func, args, kwargs)[0]
|
||||
|
||||
|
||||
def call_after_local(seconds, function, *args, **kwargs):
|
||||
warnings.warn(
|
||||
"call_after_local is renamed to spawn_after_local, which"
|
||||
"has the same signature and semantics (plus a bit extra).",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
hub = hubs.get_hub()
|
||||
g = greenlet.greenlet(function, parent=hub.greenlet)
|
||||
t = hub.schedule_call_local(seconds, g.switch, *args, **kwargs)
|
||||
return t
|
||||
|
||||
|
||||
call_after = call_after_local
|
||||
|
||||
|
||||
def exc_after(seconds, *throw_args):
|
||||
warnings.warn("Instead of exc_after, which is deprecated, use "
|
||||
"Timeout(seconds, exception)",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
if seconds is None: # dummy argument, do nothing
|
||||
return timer.Timer(seconds, lambda: None)
|
||||
hub = hubs.get_hub()
|
||||
return hub.schedule_call_local(seconds, getcurrent().throw, *throw_args)
|
||||
|
||||
|
||||
# deprecate, remove
|
||||
TimeoutError, with_timeout = (
|
||||
support.wrap_deprecated(old, new)(fun) for old, new, fun in (
|
||||
('greenthread.TimeoutError', 'Timeout', timeout.Timeout),
|
||||
('greenthread.with_timeout', 'with_timeout', timeout.with_timeout),
|
||||
))
|
||||
|
||||
|
||||
def _spawn_n(seconds, func, args, kwargs):
|
||||
hub = hubs.get_hub()
|
||||
g = greenlet.greenlet(func, parent=hub.greenlet)
|
||||
t = hub.schedule_call_global(seconds, g.switch, *args, **kwargs)
|
||||
return t, g
|
||||
|
||||
|
||||
class GreenThread(greenlet.greenlet):
|
||||
"""The GreenThread class is a type of Greenlet which has the additional
|
||||
property of being able to retrieve the return value of the main function.
|
||||
Do not construct GreenThread objects directly; call :func:`spawn` to get one.
|
||||
"""
|
||||
|
||||
def __init__(self, parent):
|
||||
greenlet.greenlet.__init__(self, self.main, parent)
|
||||
self._exit_event = event.Event()
|
||||
self._resolving_links = False
|
||||
self._exit_funcs = None
|
||||
|
||||
def __await__(self):
|
||||
"""
|
||||
Enable ``GreenThread``s to be ``await``ed in ``async`` functions.
|
||||
"""
|
||||
from eventlet.hubs.asyncio import Hub
|
||||
hub = hubs.get_hub()
|
||||
if not isinstance(hub, Hub):
|
||||
raise RuntimeError(
|
||||
"This API only works with eventlet's asyncio hub. "
|
||||
+ "To use it, set an EVENTLET_HUB=asyncio environment variable."
|
||||
)
|
||||
|
||||
future = hub.loop.create_future()
|
||||
|
||||
# When the Future finishes, check if it was due to cancellation:
|
||||
def got_future_result(future):
|
||||
if future.cancelled() and not self.dead:
|
||||
# GreenThread is still running, so kill it:
|
||||
self.kill()
|
||||
|
||||
future.add_done_callback(got_future_result)
|
||||
|
||||
# When the GreenThread finishes, set its result on the Future:
|
||||
def got_gthread_result(gthread):
|
||||
if future.done():
|
||||
# Can't set values any more.
|
||||
return
|
||||
|
||||
try:
|
||||
# Should return immediately:
|
||||
result = gthread.wait()
|
||||
future.set_result(result)
|
||||
except GreenletExit:
|
||||
future.cancel()
|
||||
except BaseException as e:
|
||||
future.set_exception(e)
|
||||
|
||||
self.link(got_gthread_result)
|
||||
|
||||
return future.__await__()
|
||||
|
||||
def wait(self):
|
||||
""" Returns the result of the main function of this GreenThread. If the
|
||||
result is a normal return value, :meth:`wait` returns it. If it raised
|
||||
an exception, :meth:`wait` will raise the same exception (though the
|
||||
stack trace will unavoidably contain some frames from within the
|
||||
greenthread module)."""
|
||||
return self._exit_event.wait()
|
||||
|
||||
def link(self, func, *curried_args, **curried_kwargs):
|
||||
""" Set up a function to be called with the results of the GreenThread.
|
||||
|
||||
The function must have the following signature::
|
||||
|
||||
def func(gt, [curried args/kwargs]):
|
||||
|
||||
When the GreenThread finishes its run, it calls *func* with itself
|
||||
and with the `curried arguments <http://en.wikipedia.org/wiki/Currying>`_ supplied
|
||||
at link-time. If the function wants to retrieve the result of the GreenThread,
|
||||
it should call wait() on its first argument.
|
||||
|
||||
Note that *func* is called within execution context of
|
||||
the GreenThread, so it is possible to interfere with other linked
|
||||
functions by doing things like switching explicitly to another
|
||||
greenthread.
|
||||
"""
|
||||
if self._exit_funcs is None:
|
||||
self._exit_funcs = deque()
|
||||
self._exit_funcs.append((func, curried_args, curried_kwargs))
|
||||
if self._exit_event.ready():
|
||||
self._resolve_links()
|
||||
|
||||
def unlink(self, func, *curried_args, **curried_kwargs):
|
||||
""" remove linked function set by :meth:`link`
|
||||
|
||||
Remove successfully return True, otherwise False
|
||||
"""
|
||||
if not self._exit_funcs:
|
||||
return False
|
||||
try:
|
||||
self._exit_funcs.remove((func, curried_args, curried_kwargs))
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def main(self, function, args, kwargs):
|
||||
try:
|
||||
result = function(*args, **kwargs)
|
||||
except:
|
||||
self._exit_event.send_exception(*sys.exc_info())
|
||||
self._resolve_links()
|
||||
raise
|
||||
else:
|
||||
self._exit_event.send(result)
|
||||
self._resolve_links()
|
||||
|
||||
def _resolve_links(self):
|
||||
# ca and ckw are the curried function arguments
|
||||
if self._resolving_links:
|
||||
return
|
||||
if not self._exit_funcs:
|
||||
return
|
||||
self._resolving_links = True
|
||||
try:
|
||||
while self._exit_funcs:
|
||||
f, ca, ckw = self._exit_funcs.popleft()
|
||||
f(self, *ca, **ckw)
|
||||
finally:
|
||||
self._resolving_links = False
|
||||
|
||||
def kill(self, *throw_args):
|
||||
"""Kills the greenthread using :func:`kill`. After being killed
|
||||
all calls to :meth:`wait` will raise *throw_args* (which default
|
||||
to :class:`greenlet.GreenletExit`)."""
|
||||
return kill(self, *throw_args)
|
||||
|
||||
def cancel(self, *throw_args):
|
||||
"""Kills the greenthread using :func:`kill`, but only if it hasn't
|
||||
already started running. After being canceled,
|
||||
all calls to :meth:`wait` will raise *throw_args* (which default
|
||||
to :class:`greenlet.GreenletExit`)."""
|
||||
return cancel(self, *throw_args)
|
||||
|
||||
|
||||
def cancel(g, *throw_args):
|
||||
"""Like :func:`kill`, but only terminates the greenthread if it hasn't
|
||||
already started execution. If the grenthread has already started
|
||||
execution, :func:`cancel` has no effect."""
|
||||
if not g:
|
||||
kill(g, *throw_args)
|
||||
|
||||
|
||||
def kill(g, *throw_args):
|
||||
"""Terminates the target greenthread by raising an exception into it.
|
||||
Whatever that greenthread might be doing; be it waiting for I/O or another
|
||||
primitive, it sees an exception right away.
|
||||
|
||||
By default, this exception is GreenletExit, but a specific exception
|
||||
may be specified. *throw_args* should be the same as the arguments to
|
||||
raise; either an exception instance or an exc_info tuple.
|
||||
|
||||
Calling :func:`kill` causes the calling greenthread to cooperatively yield.
|
||||
"""
|
||||
if g.dead:
|
||||
return
|
||||
hub = hubs.get_hub()
|
||||
if not g:
|
||||
# greenlet hasn't started yet and therefore throw won't work
|
||||
# on its own; semantically we want it to be as though the main
|
||||
# method never got called
|
||||
def just_raise(*a, **kw):
|
||||
if throw_args:
|
||||
raise throw_args[1].with_traceback(throw_args[2])
|
||||
else:
|
||||
raise greenlet.GreenletExit()
|
||||
g.run = just_raise
|
||||
if isinstance(g, GreenThread):
|
||||
# it's a GreenThread object, so we want to call its main
|
||||
# method to take advantage of the notification
|
||||
try:
|
||||
g.main(just_raise, (), {})
|
||||
except:
|
||||
pass
|
||||
current = getcurrent()
|
||||
if current is not hub.greenlet:
|
||||
# arrange to wake the caller back up immediately
|
||||
hub.ensure_greenlet()
|
||||
hub.schedule_call_global(0, current.switch)
|
||||
g.throw(*throw_args)
|
||||
188
venv/lib/python3.12/site-packages/eventlet/hubs/__init__.py
Normal file
188
venv/lib/python3.12/site-packages/eventlet/hubs/__init__.py
Normal file
@ -0,0 +1,188 @@
|
||||
import importlib
|
||||
import inspect
|
||||
import os
|
||||
import warnings
|
||||
|
||||
from eventlet import patcher
|
||||
from eventlet.support import greenlets as greenlet
|
||||
|
||||
|
||||
__all__ = ["use_hub", "get_hub", "get_default_hub", "trampoline"]
|
||||
|
||||
threading = patcher.original('threading')
|
||||
_threadlocal = threading.local()
|
||||
|
||||
|
||||
# order is important, get_default_hub returns first available from here
|
||||
builtin_hub_names = ('epolls', 'kqueue', 'poll', 'selects')
|
||||
builtin_hub_modules = tuple(importlib.import_module('eventlet.hubs.' + name) for name in builtin_hub_names)
|
||||
|
||||
|
||||
class HubError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def get_default_hub():
|
||||
"""Select the default hub implementation based on what multiplexing
|
||||
libraries are installed. The order that the hubs are tried is:
|
||||
|
||||
* epoll
|
||||
* kqueue
|
||||
* poll
|
||||
* select
|
||||
|
||||
.. include:: ../../doc/source/common.txt
|
||||
.. note :: |internal|
|
||||
"""
|
||||
for mod in builtin_hub_modules:
|
||||
if mod.is_available():
|
||||
return mod
|
||||
|
||||
raise HubError('no built-in hubs are available: {}'.format(builtin_hub_modules))
|
||||
|
||||
|
||||
def use_hub(mod=None):
|
||||
"""Use the module *mod*, containing a class called Hub, as the
|
||||
event hub. Usually not required; the default hub is usually fine.
|
||||
|
||||
`mod` can be an actual hub class, a module, a string, or None.
|
||||
|
||||
If `mod` is a class, use it directly.
|
||||
If `mod` is a module, use `module.Hub` class
|
||||
If `mod` is a string and contains either '.' or ':'
|
||||
then `use_hub` uses 'package.subpackage.module:Class' convention,
|
||||
otherwise imports `eventlet.hubs.mod`.
|
||||
If `mod` is None, `use_hub` uses the default hub.
|
||||
|
||||
Only call use_hub during application initialization,
|
||||
because it resets the hub's state and any existing
|
||||
timers or listeners will never be resumed.
|
||||
|
||||
These two threadlocal attributes are not part of Eventlet public API:
|
||||
- `threadlocal.Hub` (capital H) is hub constructor, used when no hub is currently active
|
||||
- `threadlocal.hub` (lowercase h) is active hub instance
|
||||
"""
|
||||
if mod is None:
|
||||
mod = os.environ.get('EVENTLET_HUB', None)
|
||||
if mod is None:
|
||||
mod = get_default_hub()
|
||||
if hasattr(_threadlocal, 'hub'):
|
||||
del _threadlocal.hub
|
||||
|
||||
classname = ''
|
||||
if isinstance(mod, str):
|
||||
if mod.strip() == "":
|
||||
raise RuntimeError("Need to specify a hub")
|
||||
if '.' in mod or ':' in mod:
|
||||
modulename, _, classname = mod.strip().partition(':')
|
||||
else:
|
||||
modulename = 'eventlet.hubs.' + mod
|
||||
mod = importlib.import_module(modulename)
|
||||
|
||||
if hasattr(mod, 'is_available'):
|
||||
if not mod.is_available():
|
||||
raise Exception('selected hub is not available on this system mod={}'.format(mod))
|
||||
else:
|
||||
msg = '''Please provide `is_available()` function in your custom Eventlet hub {mod}.
|
||||
It must return bool: whether hub supports current platform. See eventlet/hubs/{{epoll,kqueue}} for example.
|
||||
'''.format(mod=mod)
|
||||
warnings.warn(msg, DeprecationWarning, stacklevel=3)
|
||||
|
||||
hubclass = mod
|
||||
if not inspect.isclass(mod):
|
||||
hubclass = getattr(mod, classname or 'Hub')
|
||||
|
||||
_threadlocal.Hub = hubclass
|
||||
|
||||
|
||||
def get_hub():
|
||||
"""Get the current event hub singleton object.
|
||||
|
||||
.. note :: |internal|
|
||||
"""
|
||||
try:
|
||||
hub = _threadlocal.hub
|
||||
except AttributeError:
|
||||
try:
|
||||
_threadlocal.Hub
|
||||
except AttributeError:
|
||||
use_hub()
|
||||
hub = _threadlocal.hub = _threadlocal.Hub()
|
||||
return hub
|
||||
|
||||
|
||||
# Lame middle file import because complex dependencies in import graph
|
||||
from eventlet import timeout
|
||||
|
||||
|
||||
def trampoline(fd, read=None, write=None, timeout=None,
|
||||
timeout_exc=timeout.Timeout,
|
||||
mark_as_closed=None):
|
||||
"""Suspend the current coroutine until the given socket object or file
|
||||
descriptor is ready to *read*, ready to *write*, or the specified
|
||||
*timeout* elapses, depending on arguments specified.
|
||||
|
||||
To wait for *fd* to be ready to read, pass *read* ``=True``; ready to
|
||||
write, pass *write* ``=True``. To specify a timeout, pass the *timeout*
|
||||
argument in seconds.
|
||||
|
||||
If the specified *timeout* elapses before the socket is ready to read or
|
||||
write, *timeout_exc* will be raised instead of ``trampoline()``
|
||||
returning normally.
|
||||
|
||||
.. note :: |internal|
|
||||
"""
|
||||
t = None
|
||||
hub = get_hub()
|
||||
current = greenlet.getcurrent()
|
||||
if hub.greenlet is current:
|
||||
raise RuntimeError('do not call blocking functions from the mainloop')
|
||||
if (read and write):
|
||||
raise RuntimeError('not allowed to trampoline for reading and writing')
|
||||
try:
|
||||
fileno = fd.fileno()
|
||||
except AttributeError:
|
||||
fileno = fd
|
||||
if timeout is not None:
|
||||
def _timeout(exc):
|
||||
# This is only useful to insert debugging
|
||||
current.throw(exc)
|
||||
t = hub.schedule_call_global(timeout, _timeout, timeout_exc)
|
||||
try:
|
||||
if read:
|
||||
listener = hub.add(hub.READ, fileno, current.switch, current.throw, mark_as_closed)
|
||||
elif write:
|
||||
listener = hub.add(hub.WRITE, fileno, current.switch, current.throw, mark_as_closed)
|
||||
try:
|
||||
return hub.switch()
|
||||
finally:
|
||||
hub.remove(listener)
|
||||
finally:
|
||||
if t is not None:
|
||||
t.cancel()
|
||||
|
||||
|
||||
def notify_close(fd):
|
||||
"""
|
||||
A particular file descriptor has been explicitly closed. Register for any
|
||||
waiting listeners to be notified on the next run loop.
|
||||
"""
|
||||
hub = get_hub()
|
||||
hub.notify_close(fd)
|
||||
|
||||
|
||||
def notify_opened(fd):
|
||||
"""
|
||||
Some file descriptors may be closed 'silently' - that is, by the garbage
|
||||
collector, by an external library, etc. When the OS returns a file descriptor
|
||||
from an open call (or something similar), this may be the only indication we
|
||||
have that the FD has been closed and then recycled.
|
||||
We let the hub know that the old file descriptor is dead; any stuck listeners
|
||||
will be disabled and notified in turn.
|
||||
"""
|
||||
hub = get_hub()
|
||||
hub.mark_as_reopened(fd)
|
||||
|
||||
|
||||
class IOClosed(IOError):
|
||||
pass
|
||||
168
venv/lib/python3.12/site-packages/eventlet/hubs/asyncio.py
Normal file
168
venv/lib/python3.12/site-packages/eventlet/hubs/asyncio.py
Normal file
@ -0,0 +1,168 @@
|
||||
"""
|
||||
Asyncio-based hub, originally implemented by Miguel Grinberg.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
try:
|
||||
import concurrent.futures.thread
|
||||
concurrent_imported = True
|
||||
except RuntimeError:
|
||||
# This happens in weird edge cases where asyncio hub is started at
|
||||
# shutdown. Not much we can do if this happens.
|
||||
concurrent_imported = False
|
||||
import os
|
||||
import sys
|
||||
|
||||
from eventlet.hubs import hub
|
||||
from eventlet.patcher import original
|
||||
|
||||
|
||||
def is_available():
|
||||
"""
|
||||
Indicate whether this hub is available, since some hubs are
|
||||
platform-specific.
|
||||
|
||||
Python always has asyncio, so this is always ``True``.
|
||||
"""
|
||||
return True
|
||||
|
||||
|
||||
class Hub(hub.BaseHub):
|
||||
"""An Eventlet hub implementation on top of an asyncio event loop."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# Make sure asyncio thread pools use real threads:
|
||||
if concurrent_imported:
|
||||
concurrent.futures.thread.threading = original("threading")
|
||||
concurrent.futures.thread.queue = original("queue")
|
||||
|
||||
# Make sure select/poll/epoll/kqueue are usable by asyncio:
|
||||
import selectors
|
||||
selectors.select = original("select")
|
||||
|
||||
# Make sure DNS lookups use normal blocking API (which asyncio will run
|
||||
# in a thread):
|
||||
import asyncio.base_events
|
||||
asyncio.base_events.socket = original("socket")
|
||||
|
||||
# The presumption is that eventlet is driving the event loop, so we
|
||||
# want a new one we control.
|
||||
self.loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self.loop)
|
||||
self.sleep_event = asyncio.Event()
|
||||
|
||||
def add_timer(self, timer):
|
||||
"""
|
||||
Register a ``Timer``.
|
||||
|
||||
Typically not called directly by users.
|
||||
"""
|
||||
super().add_timer(timer)
|
||||
self.sleep_event.set()
|
||||
|
||||
def _file_cb(self, cb, fileno):
|
||||
"""
|
||||
Callback called by ``asyncio`` when a file descriptor has an event.
|
||||
"""
|
||||
try:
|
||||
cb(fileno)
|
||||
except self.SYSTEM_EXCEPTIONS:
|
||||
raise
|
||||
except:
|
||||
self.squelch_exception(fileno, sys.exc_info())
|
||||
self.sleep_event.set()
|
||||
|
||||
def add(self, evtype, fileno, cb, tb, mark_as_closed):
|
||||
"""
|
||||
Add a file descriptor of given event type to the ``Hub``. See the
|
||||
superclass for details.
|
||||
|
||||
Typically not called directly by users.
|
||||
"""
|
||||
try:
|
||||
os.fstat(fileno)
|
||||
except OSError:
|
||||
raise ValueError('Invalid file descriptor')
|
||||
already_listening = self.listeners[evtype].get(fileno) is not None
|
||||
listener = super().add(evtype, fileno, cb, tb, mark_as_closed)
|
||||
if not already_listening:
|
||||
if evtype == hub.READ:
|
||||
self.loop.add_reader(fileno, self._file_cb, cb, fileno)
|
||||
else:
|
||||
self.loop.add_writer(fileno, self._file_cb, cb, fileno)
|
||||
return listener
|
||||
|
||||
def remove(self, listener):
|
||||
"""
|
||||
Remove a listener from the ``Hub``. See the superclass for details.
|
||||
|
||||
Typically not called directly by users.
|
||||
"""
|
||||
super().remove(listener)
|
||||
evtype = listener.evtype
|
||||
fileno = listener.fileno
|
||||
if not self.listeners[evtype].get(fileno):
|
||||
if evtype == hub.READ:
|
||||
self.loop.remove_reader(fileno)
|
||||
else:
|
||||
self.loop.remove_writer(fileno)
|
||||
|
||||
def remove_descriptor(self, fileno):
|
||||
"""
|
||||
Remove a file descriptor from the ``asyncio`` loop.
|
||||
|
||||
Typically not called directly by users.
|
||||
"""
|
||||
have_read = self.listeners[hub.READ].get(fileno)
|
||||
have_write = self.listeners[hub.WRITE].get(fileno)
|
||||
super().remove_descriptor(fileno)
|
||||
if have_read:
|
||||
self.loop.remove_reader(fileno)
|
||||
if have_write:
|
||||
self.loop.remove_writer(fileno)
|
||||
|
||||
def run(self, *a, **kw):
|
||||
"""
|
||||
Start the ``Hub`` running. See the superclass for details.
|
||||
"""
|
||||
async def async_run():
|
||||
if self.running:
|
||||
raise RuntimeError("Already running!")
|
||||
try:
|
||||
self.running = True
|
||||
self.stopping = False
|
||||
while not self.stopping:
|
||||
while self.closed:
|
||||
# We ditch all of these first.
|
||||
self.close_one()
|
||||
self.prepare_timers()
|
||||
if self.debug_blocking:
|
||||
self.block_detect_pre()
|
||||
self.fire_timers(self.clock())
|
||||
if self.debug_blocking:
|
||||
self.block_detect_post()
|
||||
self.prepare_timers()
|
||||
wakeup_when = self.sleep_until()
|
||||
if wakeup_when is None:
|
||||
sleep_time = self.default_sleep()
|
||||
else:
|
||||
sleep_time = wakeup_when - self.clock()
|
||||
if sleep_time > 0:
|
||||
try:
|
||||
await asyncio.wait_for(self.sleep_event.wait(),
|
||||
sleep_time)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
self.sleep_event.clear()
|
||||
else:
|
||||
await asyncio.sleep(0)
|
||||
else:
|
||||
self.timers_canceled = 0
|
||||
del self.timers[:]
|
||||
del self.next_timers[:]
|
||||
finally:
|
||||
self.running = False
|
||||
self.stopping = False
|
||||
|
||||
self.loop.run_until_complete(async_run())
|
||||
31
venv/lib/python3.12/site-packages/eventlet/hubs/epolls.py
Normal file
31
venv/lib/python3.12/site-packages/eventlet/hubs/epolls.py
Normal file
@ -0,0 +1,31 @@
|
||||
import errno
|
||||
from eventlet import patcher, support
|
||||
from eventlet.hubs import hub, poll
|
||||
select = patcher.original('select')
|
||||
|
||||
|
||||
def is_available():
|
||||
return hasattr(select, 'epoll')
|
||||
|
||||
|
||||
# NOTE: we rely on the fact that the epoll flag constants
|
||||
# are identical in value to the poll constants
|
||||
class Hub(poll.Hub):
|
||||
def __init__(self, clock=None):
|
||||
super().__init__(clock=clock)
|
||||
self.poll = select.epoll()
|
||||
|
||||
def add(self, evtype, fileno, cb, tb, mac):
|
||||
oldlisteners = bool(self.listeners[self.READ].get(fileno) or
|
||||
self.listeners[self.WRITE].get(fileno))
|
||||
# not super() to avoid double register()
|
||||
listener = hub.BaseHub.add(self, evtype, fileno, cb, tb, mac)
|
||||
try:
|
||||
self.register(fileno, new=not oldlisteners)
|
||||
except OSError as ex: # ignore EEXIST, #80
|
||||
if support.get_errno(ex) != errno.EEXIST:
|
||||
raise
|
||||
return listener
|
||||
|
||||
def do_poll(self, seconds):
|
||||
return self.poll.poll(seconds)
|
||||
495
venv/lib/python3.12/site-packages/eventlet/hubs/hub.py
Normal file
495
venv/lib/python3.12/site-packages/eventlet/hubs/hub.py
Normal file
@ -0,0 +1,495 @@
|
||||
import errno
|
||||
import heapq
|
||||
import math
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
arm_alarm = None
|
||||
if hasattr(signal, 'setitimer'):
|
||||
def alarm_itimer(seconds):
|
||||
signal.setitimer(signal.ITIMER_REAL, seconds)
|
||||
arm_alarm = alarm_itimer
|
||||
else:
|
||||
try:
|
||||
import itimer
|
||||
arm_alarm = itimer.alarm
|
||||
except ImportError:
|
||||
def alarm_signal(seconds):
|
||||
signal.alarm(math.ceil(seconds))
|
||||
arm_alarm = alarm_signal
|
||||
|
||||
import eventlet.hubs
|
||||
from eventlet.hubs import timer
|
||||
from eventlet.support import greenlets as greenlet
|
||||
try:
|
||||
from monotonic import monotonic
|
||||
except ImportError:
|
||||
from time import monotonic
|
||||
|
||||
g_prevent_multiple_readers = True
|
||||
|
||||
READ = "read"
|
||||
WRITE = "write"
|
||||
|
||||
|
||||
def closed_callback(fileno):
|
||||
""" Used to de-fang a callback that may be triggered by a loop in BaseHub.wait
|
||||
"""
|
||||
# No-op.
|
||||
pass
|
||||
|
||||
|
||||
class FdListener:
|
||||
|
||||
def __init__(self, evtype, fileno, cb, tb, mark_as_closed):
|
||||
""" The following are required:
|
||||
cb - the standard callback, which will switch into the
|
||||
listening greenlet to indicate that the event waited upon
|
||||
is ready
|
||||
tb - a 'throwback'. This is typically greenlet.throw, used
|
||||
to raise a signal into the target greenlet indicating that
|
||||
an event was obsoleted by its underlying filehandle being
|
||||
repurposed.
|
||||
mark_as_closed - if any listener is obsoleted, this is called
|
||||
(in the context of some other client greenlet) to alert
|
||||
underlying filehandle-wrapping objects that they've been
|
||||
closed.
|
||||
"""
|
||||
assert (evtype is READ or evtype is WRITE)
|
||||
self.evtype = evtype
|
||||
self.fileno = fileno
|
||||
self.cb = cb
|
||||
self.tb = tb
|
||||
self.mark_as_closed = mark_as_closed
|
||||
self.spent = False
|
||||
self.greenlet = greenlet.getcurrent()
|
||||
|
||||
def __repr__(self):
|
||||
return "%s(%r, %r, %r, %r)" % (type(self).__name__, self.evtype, self.fileno,
|
||||
self.cb, self.tb)
|
||||
__str__ = __repr__
|
||||
|
||||
def defang(self):
|
||||
self.cb = closed_callback
|
||||
if self.mark_as_closed is not None:
|
||||
self.mark_as_closed()
|
||||
self.spent = True
|
||||
|
||||
|
||||
noop = FdListener(READ, 0, lambda x: None, lambda x: None, None)
|
||||
|
||||
|
||||
# in debug mode, track the call site that created the listener
|
||||
|
||||
|
||||
class DebugListener(FdListener):
|
||||
|
||||
def __init__(self, evtype, fileno, cb, tb, mark_as_closed):
|
||||
self.where_called = traceback.format_stack()
|
||||
self.greenlet = greenlet.getcurrent()
|
||||
super().__init__(evtype, fileno, cb, tb, mark_as_closed)
|
||||
|
||||
def __repr__(self):
|
||||
return "DebugListener(%r, %r, %r, %r, %r, %r)\n%sEndDebugFdListener" % (
|
||||
self.evtype,
|
||||
self.fileno,
|
||||
self.cb,
|
||||
self.tb,
|
||||
self.mark_as_closed,
|
||||
self.greenlet,
|
||||
''.join(self.where_called))
|
||||
__str__ = __repr__
|
||||
|
||||
|
||||
def alarm_handler(signum, frame):
|
||||
import inspect
|
||||
raise RuntimeError("Blocking detector ALARMED at" + str(inspect.getframeinfo(frame)))
|
||||
|
||||
|
||||
class BaseHub:
|
||||
""" Base hub class for easing the implementation of subclasses that are
|
||||
specific to a particular underlying event architecture. """
|
||||
|
||||
SYSTEM_EXCEPTIONS = (KeyboardInterrupt, SystemExit)
|
||||
|
||||
READ = READ
|
||||
WRITE = WRITE
|
||||
|
||||
def __init__(self, clock=None):
|
||||
self.listeners = {READ: {}, WRITE: {}}
|
||||
self.secondaries = {READ: {}, WRITE: {}}
|
||||
self.closed = []
|
||||
|
||||
if clock is None:
|
||||
clock = monotonic
|
||||
self.clock = clock
|
||||
|
||||
self.greenlet = greenlet.greenlet(self.run)
|
||||
self.stopping = False
|
||||
self.running = False
|
||||
self.timers = []
|
||||
self.next_timers = []
|
||||
self.lclass = FdListener
|
||||
self.timers_canceled = 0
|
||||
self.debug_exceptions = True
|
||||
self.debug_blocking = False
|
||||
self.debug_blocking_resolution = 1
|
||||
|
||||
def block_detect_pre(self):
|
||||
# shortest alarm we can possibly raise is one second
|
||||
tmp = signal.signal(signal.SIGALRM, alarm_handler)
|
||||
if tmp != alarm_handler:
|
||||
self._old_signal_handler = tmp
|
||||
|
||||
arm_alarm(self.debug_blocking_resolution)
|
||||
|
||||
def block_detect_post(self):
|
||||
if (hasattr(self, "_old_signal_handler") and
|
||||
self._old_signal_handler):
|
||||
signal.signal(signal.SIGALRM, self._old_signal_handler)
|
||||
signal.alarm(0)
|
||||
|
||||
def add(self, evtype, fileno, cb, tb, mark_as_closed):
|
||||
""" Signals an intent to or write a particular file descriptor.
|
||||
|
||||
The *evtype* argument is either the constant READ or WRITE.
|
||||
|
||||
The *fileno* argument is the file number of the file of interest.
|
||||
|
||||
The *cb* argument is the callback which will be called when the file
|
||||
is ready for reading/writing.
|
||||
|
||||
The *tb* argument is the throwback used to signal (into the greenlet)
|
||||
that the file was closed.
|
||||
|
||||
The *mark_as_closed* is used in the context of the event hub to
|
||||
prepare a Python object as being closed, pre-empting further
|
||||
close operations from accidentally shutting down the wrong OS thread.
|
||||
"""
|
||||
listener = self.lclass(evtype, fileno, cb, tb, mark_as_closed)
|
||||
bucket = self.listeners[evtype]
|
||||
if fileno in bucket:
|
||||
if g_prevent_multiple_readers:
|
||||
raise RuntimeError(
|
||||
"Second simultaneous %s on fileno %s "
|
||||
"detected. Unless you really know what you're doing, "
|
||||
"make sure that only one greenthread can %s any "
|
||||
"particular socket. Consider using a pools.Pool. "
|
||||
"If you do know what you're doing and want to disable "
|
||||
"this error, call "
|
||||
"eventlet.debug.hub_prevent_multiple_readers(False) - MY THREAD=%s; "
|
||||
"THAT THREAD=%s" % (
|
||||
evtype, fileno, evtype, cb, bucket[fileno]))
|
||||
# store off the second listener in another structure
|
||||
self.secondaries[evtype].setdefault(fileno, []).append(listener)
|
||||
else:
|
||||
bucket[fileno] = listener
|
||||
return listener
|
||||
|
||||
def _obsolete(self, fileno):
|
||||
""" We've received an indication that 'fileno' has been obsoleted.
|
||||
Any current listeners must be defanged, and notifications to
|
||||
their greenlets queued up to send.
|
||||
"""
|
||||
found = False
|
||||
for evtype, bucket in self.secondaries.items():
|
||||
if fileno in bucket:
|
||||
for listener in bucket[fileno]:
|
||||
found = True
|
||||
self.closed.append(listener)
|
||||
listener.defang()
|
||||
del bucket[fileno]
|
||||
|
||||
# For the primary listeners, we actually need to call remove,
|
||||
# which may modify the underlying OS polling objects.
|
||||
for evtype, bucket in self.listeners.items():
|
||||
if fileno in bucket:
|
||||
listener = bucket[fileno]
|
||||
found = True
|
||||
self.closed.append(listener)
|
||||
self.remove(listener)
|
||||
listener.defang()
|
||||
|
||||
return found
|
||||
|
||||
def notify_close(self, fileno):
|
||||
""" We might want to do something when a fileno is closed.
|
||||
However, currently it suffices to obsolete listeners only
|
||||
when we detect an old fileno being recycled, on open.
|
||||
"""
|
||||
pass
|
||||
|
||||
def remove(self, listener):
|
||||
if listener.spent:
|
||||
# trampoline may trigger this in its finally section.
|
||||
return
|
||||
|
||||
fileno = listener.fileno
|
||||
evtype = listener.evtype
|
||||
if listener is self.listeners[evtype][fileno]:
|
||||
del self.listeners[evtype][fileno]
|
||||
# migrate a secondary listener to be the primary listener
|
||||
if fileno in self.secondaries[evtype]:
|
||||
sec = self.secondaries[evtype][fileno]
|
||||
if sec:
|
||||
self.listeners[evtype][fileno] = sec.pop(0)
|
||||
if not sec:
|
||||
del self.secondaries[evtype][fileno]
|
||||
else:
|
||||
self.secondaries[evtype][fileno].remove(listener)
|
||||
if not self.secondaries[evtype][fileno]:
|
||||
del self.secondaries[evtype][fileno]
|
||||
|
||||
def mark_as_reopened(self, fileno):
|
||||
""" If a file descriptor is returned by the OS as the result of some
|
||||
open call (or equivalent), that signals that it might be being
|
||||
recycled.
|
||||
|
||||
Catch the case where the fd was previously in use.
|
||||
"""
|
||||
self._obsolete(fileno)
|
||||
|
||||
def remove_descriptor(self, fileno):
|
||||
""" Completely remove all listeners for this fileno. For internal use
|
||||
only."""
|
||||
# gather any listeners we have
|
||||
listeners = []
|
||||
listeners.append(self.listeners[READ].get(fileno, noop))
|
||||
listeners.append(self.listeners[WRITE].get(fileno, noop))
|
||||
listeners.extend(self.secondaries[READ].get(fileno, ()))
|
||||
listeners.extend(self.secondaries[WRITE].get(fileno, ()))
|
||||
for listener in listeners:
|
||||
try:
|
||||
# listener.cb may want to remove(listener)
|
||||
listener.cb(fileno)
|
||||
except Exception:
|
||||
self.squelch_generic_exception(sys.exc_info())
|
||||
# NOW this fileno is now dead to all
|
||||
self.listeners[READ].pop(fileno, None)
|
||||
self.listeners[WRITE].pop(fileno, None)
|
||||
self.secondaries[READ].pop(fileno, None)
|
||||
self.secondaries[WRITE].pop(fileno, None)
|
||||
|
||||
def close_one(self):
|
||||
""" Triggered from the main run loop. If a listener's underlying FD was
|
||||
closed somehow, throw an exception back to the trampoline, which should
|
||||
be able to manage it appropriately.
|
||||
"""
|
||||
listener = self.closed.pop()
|
||||
if not listener.greenlet.dead:
|
||||
# There's no point signalling a greenlet that's already dead.
|
||||
listener.tb(eventlet.hubs.IOClosed(errno.ENOTCONN, "Operation on closed file"))
|
||||
|
||||
def ensure_greenlet(self):
|
||||
if self.greenlet.dead:
|
||||
# create new greenlet sharing same parent as original
|
||||
new = greenlet.greenlet(self.run, self.greenlet.parent)
|
||||
# need to assign as parent of old greenlet
|
||||
# for those greenlets that are currently
|
||||
# children of the dead hub and may subsequently
|
||||
# exit without further switching to hub.
|
||||
self.greenlet.parent = new
|
||||
self.greenlet = new
|
||||
|
||||
def switch(self):
|
||||
cur = greenlet.getcurrent()
|
||||
assert cur is not self.greenlet, 'Cannot switch to MAINLOOP from MAINLOOP'
|
||||
switch_out = getattr(cur, 'switch_out', None)
|
||||
if switch_out is not None:
|
||||
try:
|
||||
switch_out()
|
||||
except:
|
||||
self.squelch_generic_exception(sys.exc_info())
|
||||
self.ensure_greenlet()
|
||||
try:
|
||||
if self.greenlet.parent is not cur:
|
||||
cur.parent = self.greenlet
|
||||
except ValueError:
|
||||
pass # gets raised if there is a greenlet parent cycle
|
||||
return self.greenlet.switch()
|
||||
|
||||
def squelch_exception(self, fileno, exc_info):
|
||||
traceback.print_exception(*exc_info)
|
||||
sys.stderr.write("Removing descriptor: %r\n" % (fileno,))
|
||||
sys.stderr.flush()
|
||||
try:
|
||||
self.remove_descriptor(fileno)
|
||||
except Exception as e:
|
||||
sys.stderr.write("Exception while removing descriptor! %r\n" % (e,))
|
||||
sys.stderr.flush()
|
||||
|
||||
def wait(self, seconds=None):
|
||||
raise NotImplementedError("Implement this in a subclass")
|
||||
|
||||
def default_sleep(self):
|
||||
return 60.0
|
||||
|
||||
def sleep_until(self):
|
||||
t = self.timers
|
||||
if not t:
|
||||
return None
|
||||
return t[0][0]
|
||||
|
||||
def run(self, *a, **kw):
|
||||
"""Run the runloop until abort is called.
|
||||
"""
|
||||
# accept and discard variable arguments because they will be
|
||||
# supplied if other greenlets have run and exited before the
|
||||
# hub's greenlet gets a chance to run
|
||||
if self.running:
|
||||
raise RuntimeError("Already running!")
|
||||
try:
|
||||
self.running = True
|
||||
self.stopping = False
|
||||
while not self.stopping:
|
||||
while self.closed:
|
||||
# We ditch all of these first.
|
||||
self.close_one()
|
||||
self.prepare_timers()
|
||||
if self.debug_blocking:
|
||||
self.block_detect_pre()
|
||||
self.fire_timers(self.clock())
|
||||
if self.debug_blocking:
|
||||
self.block_detect_post()
|
||||
self.prepare_timers()
|
||||
wakeup_when = self.sleep_until()
|
||||
if wakeup_when is None:
|
||||
sleep_time = self.default_sleep()
|
||||
else:
|
||||
sleep_time = wakeup_when - self.clock()
|
||||
if sleep_time > 0:
|
||||
self.wait(sleep_time)
|
||||
else:
|
||||
self.wait(0)
|
||||
else:
|
||||
self.timers_canceled = 0
|
||||
del self.timers[:]
|
||||
del self.next_timers[:]
|
||||
finally:
|
||||
self.running = False
|
||||
self.stopping = False
|
||||
|
||||
def abort(self, wait=False):
|
||||
"""Stop the runloop. If run is executing, it will exit after
|
||||
completing the next runloop iteration.
|
||||
|
||||
Set *wait* to True to cause abort to switch to the hub immediately and
|
||||
wait until it's finished processing. Waiting for the hub will only
|
||||
work from the main greenthread; all other greenthreads will become
|
||||
unreachable.
|
||||
"""
|
||||
if self.running:
|
||||
self.stopping = True
|
||||
if wait:
|
||||
assert self.greenlet is not greenlet.getcurrent(
|
||||
), "Can't abort with wait from inside the hub's greenlet."
|
||||
# schedule an immediate timer just so the hub doesn't sleep
|
||||
self.schedule_call_global(0, lambda: None)
|
||||
# switch to it; when done the hub will switch back to its parent,
|
||||
# the main greenlet
|
||||
self.switch()
|
||||
|
||||
def squelch_generic_exception(self, exc_info):
|
||||
if self.debug_exceptions:
|
||||
traceback.print_exception(*exc_info)
|
||||
sys.stderr.flush()
|
||||
|
||||
def squelch_timer_exception(self, timer, exc_info):
|
||||
if self.debug_exceptions:
|
||||
traceback.print_exception(*exc_info)
|
||||
sys.stderr.flush()
|
||||
|
||||
def add_timer(self, timer):
|
||||
scheduled_time = self.clock() + timer.seconds
|
||||
self.next_timers.append((scheduled_time, timer))
|
||||
return scheduled_time
|
||||
|
||||
def timer_canceled(self, timer):
|
||||
self.timers_canceled += 1
|
||||
len_timers = len(self.timers) + len(self.next_timers)
|
||||
if len_timers > 1000 and len_timers / 2 <= self.timers_canceled:
|
||||
self.timers_canceled = 0
|
||||
self.timers = [t for t in self.timers if not t[1].called]
|
||||
self.next_timers = [t for t in self.next_timers if not t[1].called]
|
||||
heapq.heapify(self.timers)
|
||||
|
||||
def prepare_timers(self):
|
||||
heappush = heapq.heappush
|
||||
t = self.timers
|
||||
for item in self.next_timers:
|
||||
if item[1].called:
|
||||
self.timers_canceled -= 1
|
||||
else:
|
||||
heappush(t, item)
|
||||
del self.next_timers[:]
|
||||
|
||||
def schedule_call_local(self, seconds, cb, *args, **kw):
|
||||
"""Schedule a callable to be called after 'seconds' seconds have
|
||||
elapsed. Cancel the timer if greenlet has exited.
|
||||
seconds: The number of seconds to wait.
|
||||
cb: The callable to call after the given time.
|
||||
*args: Arguments to pass to the callable when called.
|
||||
**kw: Keyword arguments to pass to the callable when called.
|
||||
"""
|
||||
t = timer.LocalTimer(seconds, cb, *args, **kw)
|
||||
self.add_timer(t)
|
||||
return t
|
||||
|
||||
def schedule_call_global(self, seconds, cb, *args, **kw):
|
||||
"""Schedule a callable to be called after 'seconds' seconds have
|
||||
elapsed. The timer will NOT be canceled if the current greenlet has
|
||||
exited before the timer fires.
|
||||
seconds: The number of seconds to wait.
|
||||
cb: The callable to call after the given time.
|
||||
*args: Arguments to pass to the callable when called.
|
||||
**kw: Keyword arguments to pass to the callable when called.
|
||||
"""
|
||||
t = timer.Timer(seconds, cb, *args, **kw)
|
||||
self.add_timer(t)
|
||||
return t
|
||||
|
||||
def fire_timers(self, when):
|
||||
t = self.timers
|
||||
heappop = heapq.heappop
|
||||
|
||||
while t:
|
||||
next = t[0]
|
||||
|
||||
exp = next[0]
|
||||
timer = next[1]
|
||||
|
||||
if when < exp:
|
||||
break
|
||||
|
||||
heappop(t)
|
||||
|
||||
try:
|
||||
if timer.called:
|
||||
self.timers_canceled -= 1
|
||||
else:
|
||||
timer()
|
||||
except self.SYSTEM_EXCEPTIONS:
|
||||
raise
|
||||
except:
|
||||
self.squelch_timer_exception(timer, sys.exc_info())
|
||||
|
||||
# for debugging:
|
||||
|
||||
def get_readers(self):
|
||||
return self.listeners[READ].values()
|
||||
|
||||
def get_writers(self):
|
||||
return self.listeners[WRITE].values()
|
||||
|
||||
def get_timers_count(hub):
|
||||
return len(hub.timers) + len(hub.next_timers)
|
||||
|
||||
def set_debug_listeners(self, value):
|
||||
if value:
|
||||
self.lclass = DebugListener
|
||||
else:
|
||||
self.lclass = FdListener
|
||||
|
||||
def set_timer_exceptions(self, value):
|
||||
self.debug_exceptions = value
|
||||
110
venv/lib/python3.12/site-packages/eventlet/hubs/kqueue.py
Normal file
110
venv/lib/python3.12/site-packages/eventlet/hubs/kqueue.py
Normal file
@ -0,0 +1,110 @@
|
||||
import os
|
||||
import sys
|
||||
from eventlet import patcher, support
|
||||
from eventlet.hubs import hub
|
||||
select = patcher.original('select')
|
||||
time = patcher.original('time')
|
||||
|
||||
|
||||
def is_available():
|
||||
return hasattr(select, 'kqueue')
|
||||
|
||||
|
||||
class Hub(hub.BaseHub):
|
||||
MAX_EVENTS = 100
|
||||
|
||||
def __init__(self, clock=None):
|
||||
self.FILTERS = {
|
||||
hub.READ: select.KQ_FILTER_READ,
|
||||
hub.WRITE: select.KQ_FILTER_WRITE,
|
||||
}
|
||||
super().__init__(clock)
|
||||
self._events = {}
|
||||
self._init_kqueue()
|
||||
|
||||
def _init_kqueue(self):
|
||||
self.kqueue = select.kqueue()
|
||||
self._pid = os.getpid()
|
||||
|
||||
def _reinit_kqueue(self):
|
||||
self.kqueue.close()
|
||||
self._init_kqueue()
|
||||
events = [e for i in self._events.values()
|
||||
for e in i.values()]
|
||||
self.kqueue.control(events, 0, 0)
|
||||
|
||||
def _control(self, events, max_events, timeout):
|
||||
try:
|
||||
return self.kqueue.control(events, max_events, timeout)
|
||||
except OSError:
|
||||
# have we forked?
|
||||
if os.getpid() != self._pid:
|
||||
self._reinit_kqueue()
|
||||
return self.kqueue.control(events, max_events, timeout)
|
||||
raise
|
||||
|
||||
def add(self, evtype, fileno, cb, tb, mac):
|
||||
listener = super().add(evtype, fileno, cb, tb, mac)
|
||||
events = self._events.setdefault(fileno, {})
|
||||
if evtype not in events:
|
||||
try:
|
||||
event = select.kevent(fileno, self.FILTERS.get(evtype), select.KQ_EV_ADD)
|
||||
self._control([event], 0, 0)
|
||||
events[evtype] = event
|
||||
except ValueError:
|
||||
super().remove(listener)
|
||||
raise
|
||||
return listener
|
||||
|
||||
def _delete_events(self, events):
|
||||
del_events = [
|
||||
select.kevent(e.ident, e.filter, select.KQ_EV_DELETE)
|
||||
for e in events
|
||||
]
|
||||
self._control(del_events, 0, 0)
|
||||
|
||||
def remove(self, listener):
|
||||
super().remove(listener)
|
||||
evtype = listener.evtype
|
||||
fileno = listener.fileno
|
||||
if not self.listeners[evtype].get(fileno):
|
||||
event = self._events[fileno].pop(evtype, None)
|
||||
if event is None:
|
||||
return
|
||||
try:
|
||||
self._delete_events((event,))
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def remove_descriptor(self, fileno):
|
||||
super().remove_descriptor(fileno)
|
||||
try:
|
||||
events = self._events.pop(fileno).values()
|
||||
self._delete_events(events)
|
||||
except KeyError:
|
||||
pass
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def wait(self, seconds=None):
|
||||
readers = self.listeners[self.READ]
|
||||
writers = self.listeners[self.WRITE]
|
||||
|
||||
if not readers and not writers:
|
||||
if seconds:
|
||||
time.sleep(seconds)
|
||||
return
|
||||
result = self._control([], self.MAX_EVENTS, seconds)
|
||||
SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
|
||||
for event in result:
|
||||
fileno = event.ident
|
||||
evfilt = event.filter
|
||||
try:
|
||||
if evfilt == select.KQ_FILTER_READ:
|
||||
readers.get(fileno, hub.noop).cb(fileno)
|
||||
if evfilt == select.KQ_FILTER_WRITE:
|
||||
writers.get(fileno, hub.noop).cb(fileno)
|
||||
except SYSTEM_EXCEPTIONS:
|
||||
raise
|
||||
except:
|
||||
self.squelch_exception(fileno, sys.exc_info())
|
||||
118
venv/lib/python3.12/site-packages/eventlet/hubs/poll.py
Normal file
118
venv/lib/python3.12/site-packages/eventlet/hubs/poll.py
Normal file
@ -0,0 +1,118 @@
|
||||
import errno
|
||||
import sys
|
||||
|
||||
from eventlet import patcher, support
|
||||
from eventlet.hubs import hub
|
||||
select = patcher.original('select')
|
||||
time = patcher.original('time')
|
||||
|
||||
|
||||
def is_available():
|
||||
return hasattr(select, 'poll')
|
||||
|
||||
|
||||
class Hub(hub.BaseHub):
|
||||
def __init__(self, clock=None):
|
||||
super().__init__(clock)
|
||||
self.EXC_MASK = select.POLLERR | select.POLLHUP
|
||||
self.READ_MASK = select.POLLIN | select.POLLPRI
|
||||
self.WRITE_MASK = select.POLLOUT
|
||||
self.poll = select.poll()
|
||||
|
||||
def add(self, evtype, fileno, cb, tb, mac):
|
||||
listener = super().add(evtype, fileno, cb, tb, mac)
|
||||
self.register(fileno, new=True)
|
||||
return listener
|
||||
|
||||
def remove(self, listener):
|
||||
super().remove(listener)
|
||||
self.register(listener.fileno)
|
||||
|
||||
def register(self, fileno, new=False):
|
||||
mask = 0
|
||||
if self.listeners[self.READ].get(fileno):
|
||||
mask |= self.READ_MASK | self.EXC_MASK
|
||||
if self.listeners[self.WRITE].get(fileno):
|
||||
mask |= self.WRITE_MASK | self.EXC_MASK
|
||||
try:
|
||||
if mask:
|
||||
if new:
|
||||
self.poll.register(fileno, mask)
|
||||
else:
|
||||
try:
|
||||
self.poll.modify(fileno, mask)
|
||||
except OSError:
|
||||
self.poll.register(fileno, mask)
|
||||
else:
|
||||
try:
|
||||
self.poll.unregister(fileno)
|
||||
except (KeyError, OSError):
|
||||
# raised if we try to remove a fileno that was
|
||||
# already removed/invalid
|
||||
pass
|
||||
except ValueError:
|
||||
# fileno is bad, issue 74
|
||||
self.remove_descriptor(fileno)
|
||||
raise
|
||||
|
||||
def remove_descriptor(self, fileno):
|
||||
super().remove_descriptor(fileno)
|
||||
try:
|
||||
self.poll.unregister(fileno)
|
||||
except (KeyError, ValueError, OSError):
|
||||
# raised if we try to remove a fileno that was
|
||||
# already removed/invalid
|
||||
pass
|
||||
|
||||
def do_poll(self, seconds):
|
||||
# poll.poll expects integral milliseconds
|
||||
return self.poll.poll(int(seconds * 1000.0))
|
||||
|
||||
def wait(self, seconds=None):
|
||||
readers = self.listeners[self.READ]
|
||||
writers = self.listeners[self.WRITE]
|
||||
|
||||
if not readers and not writers:
|
||||
if seconds:
|
||||
time.sleep(seconds)
|
||||
return
|
||||
try:
|
||||
presult = self.do_poll(seconds)
|
||||
except OSError as e:
|
||||
if support.get_errno(e) == errno.EINTR:
|
||||
return
|
||||
raise
|
||||
SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
|
||||
|
||||
if self.debug_blocking:
|
||||
self.block_detect_pre()
|
||||
|
||||
# Accumulate the listeners to call back to prior to
|
||||
# triggering any of them. This is to keep the set
|
||||
# of callbacks in sync with the events we've just
|
||||
# polled for. It prevents one handler from invalidating
|
||||
# another.
|
||||
callbacks = set()
|
||||
noop = hub.noop # shave getattr
|
||||
for fileno, event in presult:
|
||||
if event & self.READ_MASK:
|
||||
callbacks.add((readers.get(fileno, noop), fileno))
|
||||
if event & self.WRITE_MASK:
|
||||
callbacks.add((writers.get(fileno, noop), fileno))
|
||||
if event & select.POLLNVAL:
|
||||
self.remove_descriptor(fileno)
|
||||
continue
|
||||
if event & self.EXC_MASK:
|
||||
callbacks.add((readers.get(fileno, noop), fileno))
|
||||
callbacks.add((writers.get(fileno, noop), fileno))
|
||||
|
||||
for listener, fileno in callbacks:
|
||||
try:
|
||||
listener.cb(fileno)
|
||||
except SYSTEM_EXCEPTIONS:
|
||||
raise
|
||||
except:
|
||||
self.squelch_exception(fileno, sys.exc_info())
|
||||
|
||||
if self.debug_blocking:
|
||||
self.block_detect_post()
|
||||
@ -0,0 +1,4 @@
|
||||
raise ImportError(
|
||||
"Eventlet pyevent hub was removed because it was not maintained."
|
||||
" Try version 0.22.1 or older. Sorry for the inconvenience."
|
||||
)
|
||||
63
venv/lib/python3.12/site-packages/eventlet/hubs/selects.py
Normal file
63
venv/lib/python3.12/site-packages/eventlet/hubs/selects.py
Normal file
@ -0,0 +1,63 @@
|
||||
import errno
|
||||
import sys
|
||||
from eventlet import patcher, support
|
||||
from eventlet.hubs import hub
|
||||
select = patcher.original('select')
|
||||
time = patcher.original('time')
|
||||
|
||||
try:
|
||||
BAD_SOCK = {errno.EBADF, errno.WSAENOTSOCK}
|
||||
except AttributeError:
|
||||
BAD_SOCK = {errno.EBADF}
|
||||
|
||||
|
||||
def is_available():
|
||||
return hasattr(select, 'select')
|
||||
|
||||
|
||||
class Hub(hub.BaseHub):
|
||||
def _remove_bad_fds(self):
|
||||
""" Iterate through fds, removing the ones that are bad per the
|
||||
operating system.
|
||||
"""
|
||||
all_fds = list(self.listeners[self.READ]) + list(self.listeners[self.WRITE])
|
||||
for fd in all_fds:
|
||||
try:
|
||||
select.select([fd], [], [], 0)
|
||||
except OSError as e:
|
||||
if support.get_errno(e) in BAD_SOCK:
|
||||
self.remove_descriptor(fd)
|
||||
|
||||
def wait(self, seconds=None):
|
||||
readers = self.listeners[self.READ]
|
||||
writers = self.listeners[self.WRITE]
|
||||
if not readers and not writers:
|
||||
if seconds:
|
||||
time.sleep(seconds)
|
||||
return
|
||||
reader_fds = list(readers)
|
||||
writer_fds = list(writers)
|
||||
all_fds = reader_fds + writer_fds
|
||||
try:
|
||||
r, w, er = select.select(reader_fds, writer_fds, all_fds, seconds)
|
||||
except OSError as e:
|
||||
if support.get_errno(e) == errno.EINTR:
|
||||
return
|
||||
elif support.get_errno(e) in BAD_SOCK:
|
||||
self._remove_bad_fds()
|
||||
return
|
||||
else:
|
||||
raise
|
||||
|
||||
for fileno in er:
|
||||
readers.get(fileno, hub.noop).cb(fileno)
|
||||
writers.get(fileno, hub.noop).cb(fileno)
|
||||
|
||||
for listeners, events in ((readers, r), (writers, w)):
|
||||
for fileno in events:
|
||||
try:
|
||||
listeners.get(fileno, hub.noop).cb(fileno)
|
||||
except self.SYSTEM_EXCEPTIONS:
|
||||
raise
|
||||
except:
|
||||
self.squelch_exception(fileno, sys.exc_info())
|
||||
106
venv/lib/python3.12/site-packages/eventlet/hubs/timer.py
Normal file
106
venv/lib/python3.12/site-packages/eventlet/hubs/timer.py
Normal file
@ -0,0 +1,106 @@
|
||||
import traceback
|
||||
|
||||
import eventlet.hubs
|
||||
from eventlet.support import greenlets as greenlet
|
||||
import io
|
||||
|
||||
""" If true, captures a stack trace for each timer when constructed. This is
|
||||
useful for debugging leaking timers, to find out where the timer was set up. """
|
||||
_g_debug = False
|
||||
|
||||
|
||||
class Timer:
|
||||
def __init__(self, seconds, cb, *args, **kw):
|
||||
"""Create a timer.
|
||||
seconds: The minimum number of seconds to wait before calling
|
||||
cb: The callback to call when the timer has expired
|
||||
*args: The arguments to pass to cb
|
||||
**kw: The keyword arguments to pass to cb
|
||||
|
||||
This timer will not be run unless it is scheduled in a runloop by
|
||||
calling timer.schedule() or runloop.add_timer(timer).
|
||||
"""
|
||||
self.seconds = seconds
|
||||
self.tpl = cb, args, kw
|
||||
self.called = False
|
||||
if _g_debug:
|
||||
self.traceback = io.StringIO()
|
||||
traceback.print_stack(file=self.traceback)
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
return not self.called
|
||||
|
||||
def __repr__(self):
|
||||
secs = getattr(self, 'seconds', None)
|
||||
cb, args, kw = getattr(self, 'tpl', (None, None, None))
|
||||
retval = "Timer(%s, %s, *%s, **%s)" % (
|
||||
secs, cb, args, kw)
|
||||
if _g_debug and hasattr(self, 'traceback'):
|
||||
retval += '\n' + self.traceback.getvalue()
|
||||
return retval
|
||||
|
||||
def copy(self):
|
||||
cb, args, kw = self.tpl
|
||||
return self.__class__(self.seconds, cb, *args, **kw)
|
||||
|
||||
def schedule(self):
|
||||
"""Schedule this timer to run in the current runloop.
|
||||
"""
|
||||
self.called = False
|
||||
self.scheduled_time = eventlet.hubs.get_hub().add_timer(self)
|
||||
return self
|
||||
|
||||
def __call__(self, *args):
|
||||
if not self.called:
|
||||
self.called = True
|
||||
cb, args, kw = self.tpl
|
||||
try:
|
||||
cb(*args, **kw)
|
||||
finally:
|
||||
try:
|
||||
del self.tpl
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def cancel(self):
|
||||
"""Prevent this timer from being called. If the timer has already
|
||||
been called or canceled, has no effect.
|
||||
"""
|
||||
if not self.called:
|
||||
self.called = True
|
||||
eventlet.hubs.get_hub().timer_canceled(self)
|
||||
try:
|
||||
del self.tpl
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# No default ordering in 3.x. heapq uses <
|
||||
# FIXME should full set be added?
|
||||
def __lt__(self, other):
|
||||
return id(self) < id(other)
|
||||
|
||||
|
||||
class LocalTimer(Timer):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.greenlet = greenlet.getcurrent()
|
||||
Timer.__init__(self, *args, **kwargs)
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
if self.greenlet is None or self.greenlet.dead:
|
||||
return False
|
||||
return not self.called
|
||||
|
||||
def __call__(self, *args):
|
||||
if not self.called:
|
||||
self.called = True
|
||||
if self.greenlet is not None and self.greenlet.dead:
|
||||
return
|
||||
cb, args, kw = self.tpl
|
||||
cb(*args, **kw)
|
||||
|
||||
def cancel(self):
|
||||
self.greenlet = None
|
||||
Timer.cancel(self)
|
||||
37
venv/lib/python3.12/site-packages/eventlet/lock.py
Normal file
37
venv/lib/python3.12/site-packages/eventlet/lock.py
Normal file
@ -0,0 +1,37 @@
|
||||
from eventlet import hubs
|
||||
from eventlet.semaphore import Semaphore
|
||||
|
||||
|
||||
class Lock(Semaphore):
|
||||
|
||||
"""A lock.
|
||||
This is API-compatible with :class:`threading.Lock`.
|
||||
|
||||
It is a context manager, and thus can be used in a with block::
|
||||
|
||||
lock = Lock()
|
||||
with lock:
|
||||
do_some_stuff()
|
||||
"""
|
||||
|
||||
def release(self, blocking=True):
|
||||
"""Modify behaviour vs :class:`Semaphore` to raise a RuntimeError
|
||||
exception if the value is greater than zero. This corrects behaviour
|
||||
to realign with :class:`threading.Lock`.
|
||||
"""
|
||||
if self.counter > 0:
|
||||
raise RuntimeError("release unlocked lock")
|
||||
|
||||
# Consciously *do not* call super().release(), but instead inline
|
||||
# Semaphore.release() here. We've seen issues with logging._lock
|
||||
# deadlocking because garbage collection happened to run mid-release
|
||||
# and eliminating the extra stack frame should help prevent that.
|
||||
# See https://github.com/eventlet/eventlet/issues/742
|
||||
self.counter += 1
|
||||
if self._waiters:
|
||||
hubs.get_hub().schedule_call_global(0, self._do_acquire)
|
||||
return True
|
||||
|
||||
def _at_fork_reinit(self):
|
||||
self.counter = 1
|
||||
self._waiters.clear()
|
||||
612
venv/lib/python3.12/site-packages/eventlet/patcher.py
Normal file
612
venv/lib/python3.12/site-packages/eventlet/patcher.py
Normal file
@ -0,0 +1,612 @@
|
||||
from __future__ import annotations
|
||||
try:
|
||||
import _imp as imp
|
||||
except ImportError:
|
||||
import imp
|
||||
import sys
|
||||
try:
|
||||
# Only for this purpose, it's irrelevant if `os` was already patched.
|
||||
# https://github.com/eventlet/eventlet/pull/661
|
||||
from os import register_at_fork
|
||||
except ImportError:
|
||||
register_at_fork = None
|
||||
|
||||
import eventlet
|
||||
|
||||
|
||||
__all__ = ['inject', 'import_patched', 'monkey_patch', 'is_monkey_patched']
|
||||
|
||||
__exclude = {'__builtins__', '__file__', '__name__'}
|
||||
|
||||
|
||||
class SysModulesSaver:
|
||||
"""Class that captures some subset of the current state of
|
||||
sys.modules. Pass in an iterator of module names to the
|
||||
constructor."""
|
||||
|
||||
def __init__(self, module_names=()):
|
||||
self._saved = {}
|
||||
imp.acquire_lock()
|
||||
self.save(*module_names)
|
||||
|
||||
def save(self, *module_names):
|
||||
"""Saves the named modules to the object."""
|
||||
for modname in module_names:
|
||||
self._saved[modname] = sys.modules.get(modname, None)
|
||||
|
||||
def restore(self):
|
||||
"""Restores the modules that the saver knows about into
|
||||
sys.modules.
|
||||
"""
|
||||
try:
|
||||
for modname, mod in self._saved.items():
|
||||
if mod is not None:
|
||||
sys.modules[modname] = mod
|
||||
else:
|
||||
try:
|
||||
del sys.modules[modname]
|
||||
except KeyError:
|
||||
pass
|
||||
finally:
|
||||
imp.release_lock()
|
||||
|
||||
|
||||
def inject(module_name, new_globals, *additional_modules):
|
||||
"""Base method for "injecting" greened modules into an imported module. It
|
||||
imports the module specified in *module_name*, arranging things so
|
||||
that the already-imported modules in *additional_modules* are used when
|
||||
*module_name* makes its imports.
|
||||
|
||||
**Note:** This function does not create or change any sys.modules item, so
|
||||
if your greened module use code like 'sys.modules["your_module_name"]', you
|
||||
need to update sys.modules by yourself.
|
||||
|
||||
*new_globals* is either None or a globals dictionary that gets populated
|
||||
with the contents of the *module_name* module. This is useful when creating
|
||||
a "green" version of some other module.
|
||||
|
||||
*additional_modules* should be a collection of two-element tuples, of the
|
||||
form (<name>, <module>). If it's not specified, a default selection of
|
||||
name/module pairs is used, which should cover all use cases but may be
|
||||
slower because there are inevitably redundant or unnecessary imports.
|
||||
"""
|
||||
patched_name = '__patched_module_' + module_name
|
||||
if patched_name in sys.modules:
|
||||
# returning already-patched module so as not to destroy existing
|
||||
# references to patched modules
|
||||
return sys.modules[patched_name]
|
||||
|
||||
if not additional_modules:
|
||||
# supply some defaults
|
||||
additional_modules = (
|
||||
_green_os_modules() +
|
||||
_green_select_modules() +
|
||||
_green_socket_modules() +
|
||||
_green_thread_modules() +
|
||||
_green_time_modules())
|
||||
# _green_MySQLdb()) # enable this after a short baking-in period
|
||||
|
||||
# after this we are gonna screw with sys.modules, so capture the
|
||||
# state of all the modules we're going to mess with, and lock
|
||||
saver = SysModulesSaver([name for name, m in additional_modules])
|
||||
saver.save(module_name)
|
||||
|
||||
# Cover the target modules so that when you import the module it
|
||||
# sees only the patched versions
|
||||
for name, mod in additional_modules:
|
||||
sys.modules[name] = mod
|
||||
|
||||
# Remove the old module from sys.modules and reimport it while
|
||||
# the specified modules are in place
|
||||
sys.modules.pop(module_name, None)
|
||||
# Also remove sub modules and reimport. Use copy the keys to list
|
||||
# because of the pop operations will change the content of sys.modules
|
||||
# within th loop
|
||||
for imported_module_name in list(sys.modules.keys()):
|
||||
if imported_module_name.startswith(module_name + '.'):
|
||||
sys.modules.pop(imported_module_name, None)
|
||||
try:
|
||||
module = __import__(module_name, {}, {}, module_name.split('.')[:-1])
|
||||
|
||||
if new_globals is not None:
|
||||
# Update the given globals dictionary with everything from this new module
|
||||
for name in dir(module):
|
||||
if name not in __exclude:
|
||||
new_globals[name] = getattr(module, name)
|
||||
|
||||
# Keep a reference to the new module to prevent it from dying
|
||||
sys.modules[patched_name] = module
|
||||
finally:
|
||||
saver.restore() # Put the original modules back
|
||||
|
||||
return module
|
||||
|
||||
|
||||
def import_patched(module_name, *additional_modules, **kw_additional_modules):
|
||||
"""Imports a module in a way that ensures that the module uses "green"
|
||||
versions of the standard library modules, so that everything works
|
||||
nonblockingly.
|
||||
|
||||
The only required argument is the name of the module to be imported.
|
||||
"""
|
||||
return inject(
|
||||
module_name,
|
||||
None,
|
||||
*additional_modules + tuple(kw_additional_modules.items()))
|
||||
|
||||
|
||||
def patch_function(func, *additional_modules):
|
||||
"""Decorator that returns a version of the function that patches
|
||||
some modules for the duration of the function call. This is
|
||||
deeply gross and should only be used for functions that import
|
||||
network libraries within their function bodies that there is no
|
||||
way of getting around."""
|
||||
if not additional_modules:
|
||||
# supply some defaults
|
||||
additional_modules = (
|
||||
_green_os_modules() +
|
||||
_green_select_modules() +
|
||||
_green_socket_modules() +
|
||||
_green_thread_modules() +
|
||||
_green_time_modules())
|
||||
|
||||
def patched(*args, **kw):
|
||||
saver = SysModulesSaver()
|
||||
for name, mod in additional_modules:
|
||||
saver.save(name)
|
||||
sys.modules[name] = mod
|
||||
try:
|
||||
return func(*args, **kw)
|
||||
finally:
|
||||
saver.restore()
|
||||
return patched
|
||||
|
||||
|
||||
def _original_patch_function(func, *module_names):
|
||||
"""Kind of the contrapositive of patch_function: decorates a
|
||||
function such that when it's called, sys.modules is populated only
|
||||
with the unpatched versions of the specified modules. Unlike
|
||||
patch_function, only the names of the modules need be supplied,
|
||||
and there are no defaults. This is a gross hack; tell your kids not
|
||||
to import inside function bodies!"""
|
||||
def patched(*args, **kw):
|
||||
saver = SysModulesSaver(module_names)
|
||||
for name in module_names:
|
||||
sys.modules[name] = original(name)
|
||||
try:
|
||||
return func(*args, **kw)
|
||||
finally:
|
||||
saver.restore()
|
||||
return patched
|
||||
|
||||
|
||||
def original(modname):
|
||||
""" This returns an unpatched version of a module; this is useful for
|
||||
Eventlet itself (i.e. tpool)."""
|
||||
# note that it's not necessary to temporarily install unpatched
|
||||
# versions of all patchable modules during the import of the
|
||||
# module; this is because none of them import each other, except
|
||||
# for threading which imports thread
|
||||
original_name = '__original_module_' + modname
|
||||
if original_name in sys.modules:
|
||||
return sys.modules.get(original_name)
|
||||
|
||||
# re-import the "pure" module and store it in the global _originals
|
||||
# dict; be sure to restore whatever module had that name already
|
||||
saver = SysModulesSaver((modname,))
|
||||
sys.modules.pop(modname, None)
|
||||
# some rudimentary dependency checking -- fortunately the modules
|
||||
# we're working on don't have many dependencies so we can just do
|
||||
# some special-casing here
|
||||
deps = {'threading': '_thread', 'queue': 'threading'}
|
||||
if modname in deps:
|
||||
dependency = deps[modname]
|
||||
saver.save(dependency)
|
||||
sys.modules[dependency] = original(dependency)
|
||||
try:
|
||||
real_mod = __import__(modname, {}, {}, modname.split('.')[:-1])
|
||||
if modname in ('Queue', 'queue') and not hasattr(real_mod, '_threading'):
|
||||
# tricky hack: Queue's constructor in <2.7 imports
|
||||
# threading on every instantiation; therefore we wrap
|
||||
# it so that it always gets the original threading
|
||||
real_mod.Queue.__init__ = _original_patch_function(
|
||||
real_mod.Queue.__init__,
|
||||
'threading')
|
||||
# save a reference to the unpatched module so it doesn't get lost
|
||||
sys.modules[original_name] = real_mod
|
||||
finally:
|
||||
saver.restore()
|
||||
|
||||
return sys.modules[original_name]
|
||||
|
||||
|
||||
already_patched = {}
|
||||
|
||||
|
||||
def monkey_patch(**on):
|
||||
"""Globally patches certain system modules to be greenthread-friendly.
|
||||
|
||||
The keyword arguments afford some control over which modules are patched.
|
||||
If no keyword arguments are supplied, all possible modules are patched.
|
||||
If keywords are set to True, only the specified modules are patched. E.g.,
|
||||
``monkey_patch(socket=True, select=True)`` patches only the select and
|
||||
socket modules. Most arguments patch the single module of the same name
|
||||
(os, time, select). The exceptions are socket, which also patches the ssl
|
||||
module if present; and thread, which patches thread, threading, and Queue.
|
||||
|
||||
It's safe to call monkey_patch multiple times.
|
||||
"""
|
||||
|
||||
# Workaround for import cycle observed as following in monotonic
|
||||
# RuntimeError: no suitable implementation for this system
|
||||
# see https://github.com/eventlet/eventlet/issues/401#issuecomment-325015989
|
||||
#
|
||||
# Make sure the hub is completely imported before any
|
||||
# monkey-patching, or we risk recursion if the process of importing
|
||||
# the hub calls into monkey-patched modules.
|
||||
eventlet.hubs.get_hub()
|
||||
|
||||
accepted_args = {'os', 'select', 'socket',
|
||||
'thread', 'time', 'psycopg', 'MySQLdb',
|
||||
'builtins', 'subprocess'}
|
||||
# To make sure only one of them is passed here
|
||||
assert not ('__builtin__' in on and 'builtins' in on)
|
||||
try:
|
||||
b = on.pop('__builtin__')
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
on['builtins'] = b
|
||||
|
||||
default_on = on.pop("all", None)
|
||||
|
||||
for k in on.keys():
|
||||
if k not in accepted_args:
|
||||
raise TypeError("monkey_patch() got an unexpected "
|
||||
"keyword argument %r" % k)
|
||||
if default_on is None:
|
||||
default_on = True not in on.values()
|
||||
for modname in accepted_args:
|
||||
if modname == 'MySQLdb':
|
||||
# MySQLdb is only on when explicitly patched for the moment
|
||||
on.setdefault(modname, False)
|
||||
if modname == 'builtins':
|
||||
on.setdefault(modname, False)
|
||||
on.setdefault(modname, default_on)
|
||||
|
||||
import threading
|
||||
original_rlock_type = type(threading.RLock())
|
||||
|
||||
modules_to_patch = []
|
||||
for name, modules_function in [
|
||||
('os', _green_os_modules),
|
||||
('select', _green_select_modules),
|
||||
('socket', _green_socket_modules),
|
||||
('thread', _green_thread_modules),
|
||||
('time', _green_time_modules),
|
||||
('MySQLdb', _green_MySQLdb),
|
||||
('builtins', _green_builtins),
|
||||
('subprocess', _green_subprocess_modules),
|
||||
]:
|
||||
if on[name] and not already_patched.get(name):
|
||||
modules_to_patch += modules_function()
|
||||
already_patched[name] = True
|
||||
|
||||
if on['psycopg'] and not already_patched.get('psycopg'):
|
||||
try:
|
||||
from eventlet.support import psycopg2_patcher
|
||||
psycopg2_patcher.make_psycopg_green()
|
||||
already_patched['psycopg'] = True
|
||||
except ImportError:
|
||||
# note that if we get an importerror from trying to
|
||||
# monkeypatch psycopg, we will continually retry it
|
||||
# whenever monkey_patch is called; this should not be a
|
||||
# performance problem but it allows is_monkey_patched to
|
||||
# tell us whether or not we succeeded
|
||||
pass
|
||||
|
||||
_threading = original('threading')
|
||||
imp.acquire_lock()
|
||||
try:
|
||||
for name, mod in modules_to_patch:
|
||||
orig_mod = sys.modules.get(name)
|
||||
if orig_mod is None:
|
||||
orig_mod = __import__(name)
|
||||
for attr_name in mod.__patched__:
|
||||
patched_attr = getattr(mod, attr_name, None)
|
||||
if patched_attr is not None:
|
||||
setattr(orig_mod, attr_name, patched_attr)
|
||||
deleted = getattr(mod, '__deleted__', [])
|
||||
for attr_name in deleted:
|
||||
if hasattr(orig_mod, attr_name):
|
||||
delattr(orig_mod, attr_name)
|
||||
|
||||
# https://github.com/eventlet/eventlet/issues/592
|
||||
if name == 'threading' and register_at_fork:
|
||||
def fix_threading_active(
|
||||
_global_dict=_threading.current_thread.__globals__,
|
||||
# alias orig_mod as patched to reflect its new state
|
||||
# https://github.com/eventlet/eventlet/pull/661#discussion_r509877481
|
||||
_patched=orig_mod,
|
||||
):
|
||||
_prefork_active = [None]
|
||||
|
||||
def before_fork():
|
||||
_prefork_active[0] = _global_dict['_active']
|
||||
_global_dict['_active'] = _patched._active
|
||||
|
||||
def after_fork():
|
||||
_global_dict['_active'] = _prefork_active[0]
|
||||
|
||||
register_at_fork(
|
||||
before=before_fork,
|
||||
after_in_parent=after_fork)
|
||||
fix_threading_active()
|
||||
finally:
|
||||
imp.release_lock()
|
||||
|
||||
import importlib._bootstrap
|
||||
thread = original('_thread')
|
||||
# importlib must use real thread locks, not eventlet.Semaphore
|
||||
importlib._bootstrap._thread = thread
|
||||
|
||||
# Issue #185: Since Python 3.3, threading.RLock is implemented in C and
|
||||
# so call a C function to get the thread identifier, instead of calling
|
||||
# threading.get_ident(). Force the Python implementation of RLock which
|
||||
# calls threading.get_ident() and so is compatible with eventlet.
|
||||
import threading
|
||||
threading.RLock = threading._PyRLock
|
||||
|
||||
# Issue #508: Since Python 3.7 queue.SimpleQueue is implemented in C,
|
||||
# causing a deadlock. Replace the C implementation with the Python one.
|
||||
import queue
|
||||
queue.SimpleQueue = queue._PySimpleQueue
|
||||
|
||||
# Green existing locks _after_ patching modules, since patching modules
|
||||
# might involve imports that create new locks:
|
||||
_green_existing_locks(original_rlock_type)
|
||||
|
||||
|
||||
def is_monkey_patched(module):
|
||||
"""Returns True if the given module is monkeypatched currently, False if
|
||||
not. *module* can be either the module itself or its name.
|
||||
|
||||
Based entirely off the name of the module, so if you import a
|
||||
module some other way than with the import keyword (including
|
||||
import_patched), this might not be correct about that particular
|
||||
module."""
|
||||
return module in already_patched or \
|
||||
getattr(module, '__name__', None) in already_patched
|
||||
|
||||
|
||||
def _green_existing_locks(rlock_type):
|
||||
"""Make locks created before monkey-patching safe.
|
||||
|
||||
RLocks rely on a Lock and on Python 2, if an unpatched Lock blocks, it
|
||||
blocks the native thread. We need to replace these with green Locks.
|
||||
|
||||
This was originally noticed in the stdlib logging module."""
|
||||
import gc
|
||||
import os
|
||||
import eventlet.green.thread
|
||||
|
||||
# We're monkey-patching so there can't be any greenlets yet, ergo our thread
|
||||
# ID is the only valid owner possible.
|
||||
tid = eventlet.green.thread.get_ident()
|
||||
|
||||
# Now, upgrade all instances:
|
||||
def upgrade(old_lock):
|
||||
return _convert_py3_rlock(old_lock, tid)
|
||||
|
||||
_upgrade_instances(sys.modules, rlock_type, upgrade)
|
||||
|
||||
# Report if there are RLocks we couldn't upgrade. For cases where we're
|
||||
# using coverage.py in parent process, and more generally for tests in
|
||||
# general, this is difficult to ensure, so just don't complain in that case.
|
||||
if "PYTEST_CURRENT_TEST" in os.environ:
|
||||
return
|
||||
# On older Pythons (< 3.10), gc.get_objects() won't return any RLock
|
||||
# instances, so this warning won't get logged on older Pythons. However,
|
||||
# it's a useful warning, so we try to do it anyway for the benefit of those
|
||||
# users on 3.10 or later.
|
||||
gc.collect()
|
||||
remaining_rlocks = len({o for o in gc.get_objects() if isinstance(o, rlock_type)})
|
||||
if remaining_rlocks:
|
||||
try:
|
||||
import _frozen_importlib
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
for o in gc.get_objects():
|
||||
# This can happen in Python 3.12, at least, if monkey patch
|
||||
# happened as side-effect of importing a module.
|
||||
if not isinstance(o, rlock_type):
|
||||
continue
|
||||
if _frozen_importlib._ModuleLock in map(type, gc.get_referrers(o)):
|
||||
remaining_rlocks -= 1
|
||||
del o
|
||||
|
||||
if remaining_rlocks:
|
||||
import logging
|
||||
logger = logging.Logger("eventlet")
|
||||
logger.error("{} RLock(s) were not greened,".format(remaining_rlocks) +
|
||||
" to fix this error make sure you run eventlet.monkey_patch() " +
|
||||
"before importing any other modules.")
|
||||
|
||||
|
||||
def _upgrade_instances(container, klass, upgrade, visited=None, old_to_new=None):
|
||||
"""
|
||||
Starting with a Python object, find all instances of ``klass``, following
|
||||
references in ``dict`` values, ``list`` items, and attributes.
|
||||
|
||||
Once an object is found, replace all instances with
|
||||
``upgrade(found_object)``, again limited to the criteria above.
|
||||
|
||||
In practice this is used only for ``threading.RLock``, so we can assume
|
||||
instances are hashable.
|
||||
"""
|
||||
if visited is None:
|
||||
visited = {} # map id(obj) to obj
|
||||
if old_to_new is None:
|
||||
old_to_new = {} # map old klass instance to upgrade(old)
|
||||
|
||||
# Handle circular references:
|
||||
visited[id(container)] = container
|
||||
|
||||
def upgrade_or_traverse(obj):
|
||||
if id(obj) in visited:
|
||||
return None
|
||||
if isinstance(obj, klass):
|
||||
if obj in old_to_new:
|
||||
return old_to_new[obj]
|
||||
else:
|
||||
new = upgrade(obj)
|
||||
old_to_new[obj] = new
|
||||
return new
|
||||
else:
|
||||
_upgrade_instances(obj, klass, upgrade, visited, old_to_new)
|
||||
return None
|
||||
|
||||
if isinstance(container, dict):
|
||||
for k, v in list(container.items()):
|
||||
new = upgrade_or_traverse(v)
|
||||
if new is not None:
|
||||
container[k] = new
|
||||
if isinstance(container, list):
|
||||
for i, v in enumerate(container):
|
||||
new = upgrade_or_traverse(v)
|
||||
if new is not None:
|
||||
container[i] = new
|
||||
try:
|
||||
container_vars = vars(container)
|
||||
except TypeError:
|
||||
pass
|
||||
else:
|
||||
# If we get here, we're operating on an object that could
|
||||
# be doing strange things. If anything bad happens, error and
|
||||
# warn the eventlet user to monkey_patch earlier.
|
||||
try:
|
||||
for k, v in list(container_vars.items()):
|
||||
new = upgrade_or_traverse(v)
|
||||
if new is not None:
|
||||
setattr(container, k, new)
|
||||
except:
|
||||
import logging
|
||||
logger = logging.Logger("eventlet")
|
||||
logger.exception("An exception was thrown while monkey_patching for eventlet. "
|
||||
"to fix this error make sure you run eventlet.monkey_patch() "
|
||||
"before importing any other modules.", exc_info=True)
|
||||
|
||||
|
||||
def _convert_py3_rlock(old, tid):
|
||||
"""
|
||||
Convert a normal RLock to one implemented in Python.
|
||||
|
||||
This is necessary to make RLocks work with eventlet, but also introduces
|
||||
bugs, e.g. https://bugs.python.org/issue13697. So more of a downgrade,
|
||||
really.
|
||||
"""
|
||||
import threading
|
||||
from eventlet.green.thread import allocate_lock
|
||||
new = threading._PyRLock()
|
||||
if not hasattr(new, "_block") or not hasattr(new, "_owner"):
|
||||
# These will only fail if Python changes its internal implementation of
|
||||
# _PyRLock:
|
||||
raise RuntimeError(
|
||||
"INTERNAL BUG. Perhaps you are using a major version " +
|
||||
"of Python that is unsupported by eventlet? Please file a bug " +
|
||||
"at https://github.com/eventlet/eventlet/issues/new")
|
||||
new._block = allocate_lock()
|
||||
acquired = False
|
||||
while old._is_owned():
|
||||
old.release()
|
||||
new.acquire()
|
||||
acquired = True
|
||||
if old._is_owned():
|
||||
new.acquire()
|
||||
acquired = True
|
||||
if acquired:
|
||||
new._owner = tid
|
||||
return new
|
||||
|
||||
|
||||
def _green_os_modules():
|
||||
from eventlet.green import os
|
||||
return [('os', os)]
|
||||
|
||||
|
||||
def _green_select_modules():
|
||||
from eventlet.green import select
|
||||
modules = [('select', select)]
|
||||
|
||||
from eventlet.green import selectors
|
||||
modules.append(('selectors', selectors))
|
||||
|
||||
return modules
|
||||
|
||||
|
||||
def _green_socket_modules():
|
||||
from eventlet.green import socket
|
||||
try:
|
||||
from eventlet.green import ssl
|
||||
return [('socket', socket), ('ssl', ssl)]
|
||||
except ImportError:
|
||||
return [('socket', socket)]
|
||||
|
||||
|
||||
def _green_subprocess_modules():
|
||||
from eventlet.green import subprocess
|
||||
return [('subprocess', subprocess)]
|
||||
|
||||
|
||||
def _green_thread_modules():
|
||||
from eventlet.green import Queue
|
||||
from eventlet.green import thread
|
||||
from eventlet.green import threading
|
||||
return [('queue', Queue), ('_thread', thread), ('threading', threading)]
|
||||
|
||||
|
||||
def _green_time_modules():
|
||||
from eventlet.green import time
|
||||
return [('time', time)]
|
||||
|
||||
|
||||
def _green_MySQLdb():
|
||||
try:
|
||||
from eventlet.green import MySQLdb
|
||||
return [('MySQLdb', MySQLdb)]
|
||||
except ImportError:
|
||||
return []
|
||||
|
||||
|
||||
def _green_builtins():
|
||||
try:
|
||||
from eventlet.green import builtin
|
||||
return [('builtins', builtin)]
|
||||
except ImportError:
|
||||
return []
|
||||
|
||||
|
||||
def slurp_properties(source, destination, ignore=[], srckeys=None):
|
||||
"""Copy properties from *source* (assumed to be a module) to
|
||||
*destination* (assumed to be a dict).
|
||||
|
||||
*ignore* lists properties that should not be thusly copied.
|
||||
*srckeys* is a list of keys to copy, if the source's __all__ is
|
||||
untrustworthy.
|
||||
"""
|
||||
if srckeys is None:
|
||||
srckeys = source.__all__
|
||||
destination.update({
|
||||
name: getattr(source, name)
|
||||
for name in srckeys
|
||||
if not (name.startswith('__') or name in ignore)
|
||||
})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.argv.pop(0)
|
||||
monkey_patch()
|
||||
with open(sys.argv[0]) as f:
|
||||
code = compile(f.read(), sys.argv[0], 'exec')
|
||||
exec(code)
|
||||
184
venv/lib/python3.12/site-packages/eventlet/pools.py
Normal file
184
venv/lib/python3.12/site-packages/eventlet/pools.py
Normal file
@ -0,0 +1,184 @@
|
||||
import collections
|
||||
from contextlib import contextmanager
|
||||
|
||||
from eventlet import queue
|
||||
|
||||
|
||||
__all__ = ['Pool', 'TokenPool']
|
||||
|
||||
|
||||
class Pool:
|
||||
"""
|
||||
Pool class implements resource limitation and construction.
|
||||
|
||||
There are two ways of using Pool: passing a `create` argument or
|
||||
subclassing. In either case you must provide a way to create
|
||||
the resource.
|
||||
|
||||
When using `create` argument, pass a function with no arguments::
|
||||
|
||||
http_pool = pools.Pool(create=httplib2.Http)
|
||||
|
||||
If you need to pass arguments, build a nullary function with either
|
||||
`lambda` expression::
|
||||
|
||||
http_pool = pools.Pool(create=lambda: httplib2.Http(timeout=90))
|
||||
|
||||
or :func:`functools.partial`::
|
||||
|
||||
from functools import partial
|
||||
http_pool = pools.Pool(create=partial(httplib2.Http, timeout=90))
|
||||
|
||||
When subclassing, define only the :meth:`create` method
|
||||
to implement the desired resource::
|
||||
|
||||
class MyPool(pools.Pool):
|
||||
def create(self):
|
||||
return MyObject()
|
||||
|
||||
If using 2.5 or greater, the :meth:`item` method acts as a context manager;
|
||||
that's the best way to use it::
|
||||
|
||||
with mypool.item() as thing:
|
||||
thing.dostuff()
|
||||
|
||||
The maximum size of the pool can be modified at runtime via
|
||||
the :meth:`resize` method.
|
||||
|
||||
Specifying a non-zero *min-size* argument pre-populates the pool with
|
||||
*min_size* items. *max-size* sets a hard limit to the size of the pool --
|
||||
it cannot contain any more items than *max_size*, and if there are already
|
||||
*max_size* items 'checked out' of the pool, the pool will cause any
|
||||
greenthread calling :meth:`get` to cooperatively yield until an item
|
||||
is :meth:`put` in.
|
||||
"""
|
||||
|
||||
def __init__(self, min_size=0, max_size=4, order_as_stack=False, create=None):
|
||||
"""*order_as_stack* governs the ordering of the items in the free pool.
|
||||
If ``False`` (the default), the free items collection (of items that
|
||||
were created and were put back in the pool) acts as a round-robin,
|
||||
giving each item approximately equal utilization. If ``True``, the
|
||||
free pool acts as a FILO stack, which preferentially re-uses items that
|
||||
have most recently been used.
|
||||
"""
|
||||
self.min_size = min_size
|
||||
self.max_size = max_size
|
||||
self.order_as_stack = order_as_stack
|
||||
self.current_size = 0
|
||||
self.channel = queue.LightQueue(0)
|
||||
self.free_items = collections.deque()
|
||||
if create is not None:
|
||||
self.create = create
|
||||
|
||||
for x in range(min_size):
|
||||
self.current_size += 1
|
||||
self.free_items.append(self.create())
|
||||
|
||||
def get(self):
|
||||
"""Return an item from the pool, when one is available. This may
|
||||
cause the calling greenthread to block.
|
||||
"""
|
||||
if self.free_items:
|
||||
return self.free_items.popleft()
|
||||
self.current_size += 1
|
||||
if self.current_size <= self.max_size:
|
||||
try:
|
||||
created = self.create()
|
||||
except:
|
||||
self.current_size -= 1
|
||||
raise
|
||||
return created
|
||||
self.current_size -= 1 # did not create
|
||||
return self.channel.get()
|
||||
|
||||
@contextmanager
|
||||
def item(self):
|
||||
""" Get an object out of the pool, for use with with statement.
|
||||
|
||||
>>> from eventlet import pools
|
||||
>>> pool = pools.TokenPool(max_size=4)
|
||||
>>> with pool.item() as obj:
|
||||
... print("got token")
|
||||
...
|
||||
got token
|
||||
>>> pool.free()
|
||||
4
|
||||
"""
|
||||
obj = self.get()
|
||||
try:
|
||||
yield obj
|
||||
finally:
|
||||
self.put(obj)
|
||||
|
||||
def put(self, item):
|
||||
"""Put an item back into the pool, when done. This may
|
||||
cause the putting greenthread to block.
|
||||
"""
|
||||
if self.current_size > self.max_size:
|
||||
self.current_size -= 1
|
||||
return
|
||||
|
||||
if self.waiting():
|
||||
try:
|
||||
self.channel.put(item, block=False)
|
||||
return
|
||||
except queue.Full:
|
||||
pass
|
||||
|
||||
if self.order_as_stack:
|
||||
self.free_items.appendleft(item)
|
||||
else:
|
||||
self.free_items.append(item)
|
||||
|
||||
def resize(self, new_size):
|
||||
"""Resize the pool to *new_size*.
|
||||
|
||||
Adjusting this number does not affect existing items checked out of
|
||||
the pool, nor on any greenthreads who are waiting for an item to free
|
||||
up. Some indeterminate number of :meth:`get`/:meth:`put`
|
||||
cycles will be necessary before the new maximum size truly matches
|
||||
the actual operation of the pool.
|
||||
"""
|
||||
self.max_size = new_size
|
||||
|
||||
def free(self):
|
||||
"""Return the number of free items in the pool. This corresponds
|
||||
to the number of :meth:`get` calls needed to empty the pool.
|
||||
"""
|
||||
return len(self.free_items) + self.max_size - self.current_size
|
||||
|
||||
def waiting(self):
|
||||
"""Return the number of routines waiting for a pool item.
|
||||
"""
|
||||
return max(0, self.channel.getting() - self.channel.putting())
|
||||
|
||||
def create(self):
|
||||
"""Generate a new pool item. In order for the pool to
|
||||
function, either this method must be overriden in a subclass
|
||||
or the pool must be constructed with the `create` argument.
|
||||
It accepts no arguments and returns a single instance of
|
||||
whatever thing the pool is supposed to contain.
|
||||
|
||||
In general, :meth:`create` is called whenever the pool exceeds its
|
||||
previous high-water mark of concurrently-checked-out-items. In other
|
||||
words, in a new pool with *min_size* of 0, the very first call
|
||||
to :meth:`get` will result in a call to :meth:`create`. If the first
|
||||
caller calls :meth:`put` before some other caller calls :meth:`get`,
|
||||
then the first item will be returned, and :meth:`create` will not be
|
||||
called a second time.
|
||||
"""
|
||||
raise NotImplementedError("Implement in subclass")
|
||||
|
||||
|
||||
class Token:
|
||||
pass
|
||||
|
||||
|
||||
class TokenPool(Pool):
|
||||
"""A pool which gives out tokens (opaque unique objects), which indicate
|
||||
that the coroutine which holds the token has a right to consume some
|
||||
limited resource.
|
||||
"""
|
||||
|
||||
def create(self):
|
||||
return Token()
|
||||
490
venv/lib/python3.12/site-packages/eventlet/queue.py
Normal file
490
venv/lib/python3.12/site-packages/eventlet/queue.py
Normal file
@ -0,0 +1,490 @@
|
||||
# Copyright (c) 2009 Denis Bilenko, denis.bilenko at gmail com
|
||||
# Copyright (c) 2010 Eventlet Contributors (see AUTHORS)
|
||||
# and licensed under the MIT license:
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
# THE SOFTWARE.
|
||||
|
||||
"""Synchronized queues.
|
||||
|
||||
The :mod:`eventlet.queue` module implements multi-producer, multi-consumer
|
||||
queues that work across greenlets, with the API similar to the classes found in
|
||||
the standard :mod:`Queue` and :class:`multiprocessing <multiprocessing.Queue>`
|
||||
modules.
|
||||
|
||||
A major difference is that queues in this module operate as channels when
|
||||
initialized with *maxsize* of zero. In such case, both :meth:`Queue.empty`
|
||||
and :meth:`Queue.full` return ``True`` and :meth:`Queue.put` always blocks until
|
||||
a call to :meth:`Queue.get` retrieves the item.
|
||||
|
||||
An interesting difference, made possible because of greenthreads, is
|
||||
that :meth:`Queue.qsize`, :meth:`Queue.empty`, and :meth:`Queue.full` *can* be
|
||||
used as indicators of whether the subsequent :meth:`Queue.get`
|
||||
or :meth:`Queue.put` will not block. The new methods :meth:`Queue.getting`
|
||||
and :meth:`Queue.putting` report on the number of greenthreads blocking
|
||||
in :meth:`put <Queue.put>` or :meth:`get <Queue.get>` respectively.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import heapq
|
||||
import collections
|
||||
import traceback
|
||||
|
||||
from eventlet.event import Event
|
||||
from eventlet.greenthread import getcurrent
|
||||
from eventlet.hubs import get_hub
|
||||
import queue as Stdlib_Queue
|
||||
from eventlet.timeout import Timeout
|
||||
|
||||
|
||||
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'LightQueue', 'Full', 'Empty']
|
||||
|
||||
_NONE = object()
|
||||
Full = Stdlib_Queue.Full
|
||||
Empty = Stdlib_Queue.Empty
|
||||
|
||||
|
||||
class Waiter:
|
||||
"""A low level synchronization class.
|
||||
|
||||
Wrapper around greenlet's ``switch()`` and ``throw()`` calls that makes them safe:
|
||||
|
||||
* switching will occur only if the waiting greenlet is executing :meth:`wait`
|
||||
method currently. Otherwise, :meth:`switch` and :meth:`throw` are no-ops.
|
||||
* any error raised in the greenlet is handled inside :meth:`switch` and :meth:`throw`
|
||||
|
||||
The :meth:`switch` and :meth:`throw` methods must only be called from the :class:`Hub` greenlet.
|
||||
The :meth:`wait` method must be called from a greenlet other than :class:`Hub`.
|
||||
"""
|
||||
__slots__ = ['greenlet']
|
||||
|
||||
def __init__(self):
|
||||
self.greenlet = None
|
||||
|
||||
def __repr__(self):
|
||||
if self.waiting:
|
||||
waiting = ' waiting'
|
||||
else:
|
||||
waiting = ''
|
||||
return '<%s at %s%s greenlet=%r>' % (
|
||||
type(self).__name__, hex(id(self)), waiting, self.greenlet,
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
>>> print(Waiter())
|
||||
<Waiter greenlet=None>
|
||||
"""
|
||||
if self.waiting:
|
||||
waiting = ' waiting'
|
||||
else:
|
||||
waiting = ''
|
||||
return '<%s%s greenlet=%s>' % (type(self).__name__, waiting, self.greenlet)
|
||||
|
||||
def __nonzero__(self):
|
||||
return self.greenlet is not None
|
||||
|
||||
__bool__ = __nonzero__
|
||||
|
||||
@property
|
||||
def waiting(self):
|
||||
return self.greenlet is not None
|
||||
|
||||
def switch(self, value=None):
|
||||
"""Wake up the greenlet that is calling wait() currently (if there is one).
|
||||
Can only be called from Hub's greenlet.
|
||||
"""
|
||||
assert getcurrent() is get_hub(
|
||||
).greenlet, "Can only use Waiter.switch method from the mainloop"
|
||||
if self.greenlet is not None:
|
||||
try:
|
||||
self.greenlet.switch(value)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
|
||||
def throw(self, *throw_args):
|
||||
"""Make greenlet calling wait() wake up (if there is a wait()).
|
||||
Can only be called from Hub's greenlet.
|
||||
"""
|
||||
assert getcurrent() is get_hub(
|
||||
).greenlet, "Can only use Waiter.switch method from the mainloop"
|
||||
if self.greenlet is not None:
|
||||
try:
|
||||
self.greenlet.throw(*throw_args)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
|
||||
# XXX should be renamed to get() ? and the whole class is called Receiver?
|
||||
def wait(self):
|
||||
"""Wait until switch() or throw() is called.
|
||||
"""
|
||||
assert self.greenlet is None, 'This Waiter is already used by %r' % (self.greenlet, )
|
||||
self.greenlet = getcurrent()
|
||||
try:
|
||||
return get_hub().switch()
|
||||
finally:
|
||||
self.greenlet = None
|
||||
|
||||
|
||||
class LightQueue:
|
||||
"""
|
||||
This is a variant of Queue that behaves mostly like the standard
|
||||
:class:`Stdlib_Queue`. It differs by not supporting the
|
||||
:meth:`task_done <Stdlib_Queue.task_done>` or
|
||||
:meth:`join <Stdlib_Queue.join>` methods, and is a little faster for
|
||||
not having that overhead.
|
||||
"""
|
||||
|
||||
def __init__(self, maxsize=None):
|
||||
if maxsize is None or maxsize < 0: # None is not comparable in 3.x
|
||||
self.maxsize = None
|
||||
else:
|
||||
self.maxsize = maxsize
|
||||
self.getters = set()
|
||||
self.putters = set()
|
||||
self._event_unlock = None
|
||||
self._init(maxsize)
|
||||
|
||||
# QQQ make maxsize into a property with setter that schedules unlock if necessary
|
||||
|
||||
def _init(self, maxsize):
|
||||
self.queue = collections.deque()
|
||||
|
||||
def _get(self):
|
||||
return self.queue.popleft()
|
||||
|
||||
def _put(self, item):
|
||||
self.queue.append(item)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._format())
|
||||
|
||||
def __str__(self):
|
||||
return '<%s %s>' % (type(self).__name__, self._format())
|
||||
|
||||
def _format(self):
|
||||
result = 'maxsize=%r' % (self.maxsize, )
|
||||
if getattr(self, 'queue', None):
|
||||
result += ' queue=%r' % self.queue
|
||||
if self.getters:
|
||||
result += ' getters[%s]' % len(self.getters)
|
||||
if self.putters:
|
||||
result += ' putters[%s]' % len(self.putters)
|
||||
if self._event_unlock is not None:
|
||||
result += ' unlocking'
|
||||
return result
|
||||
|
||||
def qsize(self):
|
||||
"""Return the size of the queue."""
|
||||
return len(self.queue)
|
||||
|
||||
def resize(self, size):
|
||||
"""Resizes the queue's maximum size.
|
||||
|
||||
If the size is increased, and there are putters waiting, they may be woken up."""
|
||||
# None is not comparable in 3.x
|
||||
if self.maxsize is not None and (size is None or size > self.maxsize):
|
||||
# Maybe wake some stuff up
|
||||
self._schedule_unlock()
|
||||
self.maxsize = size
|
||||
|
||||
def putting(self):
|
||||
"""Returns the number of greenthreads that are blocked waiting to put
|
||||
items into the queue."""
|
||||
return len(self.putters)
|
||||
|
||||
def getting(self):
|
||||
"""Returns the number of greenthreads that are blocked waiting on an
|
||||
empty queue."""
|
||||
return len(self.getters)
|
||||
|
||||
def empty(self):
|
||||
"""Return ``True`` if the queue is empty, ``False`` otherwise."""
|
||||
return not self.qsize()
|
||||
|
||||
def full(self):
|
||||
"""Return ``True`` if the queue is full, ``False`` otherwise.
|
||||
|
||||
``Queue(None)`` is never full.
|
||||
"""
|
||||
# None is not comparable in 3.x
|
||||
return self.maxsize is not None and self.qsize() >= self.maxsize
|
||||
|
||||
def put(self, item, block=True, timeout=None):
|
||||
"""Put an item into the queue.
|
||||
|
||||
If optional arg *block* is true and *timeout* is ``None`` (the default),
|
||||
block if necessary until a free slot is available. If *timeout* is
|
||||
a positive number, it blocks at most *timeout* seconds and raises
|
||||
the :class:`Full` exception if no free slot was available within that time.
|
||||
Otherwise (*block* is false), put an item on the queue if a free slot
|
||||
is immediately available, else raise the :class:`Full` exception (*timeout*
|
||||
is ignored in that case).
|
||||
"""
|
||||
if self.maxsize is None or self.qsize() < self.maxsize:
|
||||
# there's a free slot, put an item right away
|
||||
self._put(item)
|
||||
if self.getters:
|
||||
self._schedule_unlock()
|
||||
elif not block and get_hub().greenlet is getcurrent():
|
||||
# we're in the mainloop, so we cannot wait; we can switch() to other greenlets though
|
||||
# find a getter and deliver an item to it
|
||||
while self.getters:
|
||||
getter = self.getters.pop()
|
||||
if getter:
|
||||
self._put(item)
|
||||
item = self._get()
|
||||
getter.switch(item)
|
||||
return
|
||||
raise Full
|
||||
elif block:
|
||||
waiter = ItemWaiter(item, block)
|
||||
self.putters.add(waiter)
|
||||
timeout = Timeout(timeout, Full)
|
||||
try:
|
||||
if self.getters:
|
||||
self._schedule_unlock()
|
||||
result = waiter.wait()
|
||||
assert result is waiter, "Invalid switch into Queue.put: %r" % (result, )
|
||||
if waiter.item is not _NONE:
|
||||
self._put(item)
|
||||
finally:
|
||||
timeout.cancel()
|
||||
self.putters.discard(waiter)
|
||||
elif self.getters:
|
||||
waiter = ItemWaiter(item, block)
|
||||
self.putters.add(waiter)
|
||||
self._schedule_unlock()
|
||||
result = waiter.wait()
|
||||
assert result is waiter, "Invalid switch into Queue.put: %r" % (result, )
|
||||
if waiter.item is not _NONE:
|
||||
raise Full
|
||||
else:
|
||||
raise Full
|
||||
|
||||
def put_nowait(self, item):
|
||||
"""Put an item into the queue without blocking.
|
||||
|
||||
Only enqueue the item if a free slot is immediately available.
|
||||
Otherwise raise the :class:`Full` exception.
|
||||
"""
|
||||
self.put(item, False)
|
||||
|
||||
def get(self, block=True, timeout=None):
|
||||
"""Remove and return an item from the queue.
|
||||
|
||||
If optional args *block* is true and *timeout* is ``None`` (the default),
|
||||
block if necessary until an item is available. If *timeout* is a positive number,
|
||||
it blocks at most *timeout* seconds and raises the :class:`Empty` exception
|
||||
if no item was available within that time. Otherwise (*block* is false), return
|
||||
an item if one is immediately available, else raise the :class:`Empty` exception
|
||||
(*timeout* is ignored in that case).
|
||||
"""
|
||||
if self.qsize():
|
||||
if self.putters:
|
||||
self._schedule_unlock()
|
||||
return self._get()
|
||||
elif not block and get_hub().greenlet is getcurrent():
|
||||
# special case to make get_nowait() runnable in the mainloop greenlet
|
||||
# there are no items in the queue; try to fix the situation by unlocking putters
|
||||
while self.putters:
|
||||
putter = self.putters.pop()
|
||||
if putter:
|
||||
putter.switch(putter)
|
||||
if self.qsize():
|
||||
return self._get()
|
||||
raise Empty
|
||||
elif block:
|
||||
waiter = Waiter()
|
||||
timeout = Timeout(timeout, Empty)
|
||||
try:
|
||||
self.getters.add(waiter)
|
||||
if self.putters:
|
||||
self._schedule_unlock()
|
||||
try:
|
||||
return waiter.wait()
|
||||
except:
|
||||
self._schedule_unlock()
|
||||
raise
|
||||
finally:
|
||||
self.getters.discard(waiter)
|
||||
timeout.cancel()
|
||||
else:
|
||||
raise Empty
|
||||
|
||||
def get_nowait(self):
|
||||
"""Remove and return an item from the queue without blocking.
|
||||
|
||||
Only get an item if one is immediately available. Otherwise
|
||||
raise the :class:`Empty` exception.
|
||||
"""
|
||||
return self.get(False)
|
||||
|
||||
def _unlock(self):
|
||||
try:
|
||||
while True:
|
||||
if self.qsize() and self.getters:
|
||||
getter = self.getters.pop()
|
||||
if getter:
|
||||
try:
|
||||
item = self._get()
|
||||
except:
|
||||
getter.throw(*sys.exc_info())
|
||||
else:
|
||||
getter.switch(item)
|
||||
elif self.putters and self.getters:
|
||||
putter = self.putters.pop()
|
||||
if putter:
|
||||
getter = self.getters.pop()
|
||||
if getter:
|
||||
item = putter.item
|
||||
# this makes greenlet calling put() not to call _put() again
|
||||
putter.item = _NONE
|
||||
self._put(item)
|
||||
item = self._get()
|
||||
getter.switch(item)
|
||||
putter.switch(putter)
|
||||
else:
|
||||
self.putters.add(putter)
|
||||
elif self.putters and (self.getters or
|
||||
self.maxsize is None or
|
||||
self.qsize() < self.maxsize):
|
||||
putter = self.putters.pop()
|
||||
putter.switch(putter)
|
||||
elif self.putters and not self.getters:
|
||||
full = [p for p in self.putters if not p.block]
|
||||
if not full:
|
||||
break
|
||||
for putter in full:
|
||||
self.putters.discard(putter)
|
||||
get_hub().schedule_call_global(
|
||||
0, putter.greenlet.throw, Full)
|
||||
else:
|
||||
break
|
||||
finally:
|
||||
self._event_unlock = None # QQQ maybe it's possible to obtain this info from libevent?
|
||||
# i.e. whether this event is pending _OR_ currently executing
|
||||
# testcase: 2 greenlets: while True: q.put(q.get()) - nothing else has a change to execute
|
||||
# to avoid this, schedule unlock with timer(0, ...) once in a while
|
||||
|
||||
def _schedule_unlock(self):
|
||||
if self._event_unlock is None:
|
||||
self._event_unlock = get_hub().schedule_call_global(0, self._unlock)
|
||||
|
||||
|
||||
class ItemWaiter(Waiter):
|
||||
__slots__ = ['item', 'block']
|
||||
|
||||
def __init__(self, item, block):
|
||||
Waiter.__init__(self)
|
||||
self.item = item
|
||||
self.block = block
|
||||
|
||||
|
||||
class Queue(LightQueue):
|
||||
'''Create a queue object with a given maximum size.
|
||||
|
||||
If *maxsize* is less than zero or ``None``, the queue size is infinite.
|
||||
|
||||
``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks
|
||||
until the item is delivered. (This is unlike the standard
|
||||
:class:`Stdlib_Queue`, where 0 means infinite size).
|
||||
|
||||
In all other respects, this Queue class resembles the standard library,
|
||||
:class:`Stdlib_Queue`.
|
||||
'''
|
||||
|
||||
def __init__(self, maxsize=None):
|
||||
LightQueue.__init__(self, maxsize)
|
||||
self.unfinished_tasks = 0
|
||||
self._cond = Event()
|
||||
|
||||
def _format(self):
|
||||
result = LightQueue._format(self)
|
||||
if self.unfinished_tasks:
|
||||
result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond)
|
||||
return result
|
||||
|
||||
def _put(self, item):
|
||||
LightQueue._put(self, item)
|
||||
self._put_bookkeeping()
|
||||
|
||||
def _put_bookkeeping(self):
|
||||
self.unfinished_tasks += 1
|
||||
if self._cond.ready():
|
||||
self._cond.reset()
|
||||
|
||||
def task_done(self):
|
||||
'''Indicate that a formerly enqueued task is complete. Used by queue consumer threads.
|
||||
For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to
|
||||
:meth:`task_done` tells the queue that the processing on the task is complete.
|
||||
|
||||
If a :meth:`join` is currently blocking, it will resume when all items have been processed
|
||||
(meaning that a :meth:`task_done` call was received for every item that had been
|
||||
:meth:`put <Queue.put>` into the queue).
|
||||
|
||||
Raises a :exc:`ValueError` if called more times than there were items placed in the queue.
|
||||
'''
|
||||
|
||||
if self.unfinished_tasks <= 0:
|
||||
raise ValueError('task_done() called too many times')
|
||||
self.unfinished_tasks -= 1
|
||||
if self.unfinished_tasks == 0:
|
||||
self._cond.send(None)
|
||||
|
||||
def join(self):
|
||||
'''Block until all items in the queue have been gotten and processed.
|
||||
|
||||
The count of unfinished tasks goes up whenever an item is added to the queue.
|
||||
The count goes down whenever a consumer thread calls :meth:`task_done` to indicate
|
||||
that the item was retrieved and all work on it is complete. When the count of
|
||||
unfinished tasks drops to zero, :meth:`join` unblocks.
|
||||
'''
|
||||
if self.unfinished_tasks > 0:
|
||||
self._cond.wait()
|
||||
|
||||
|
||||
class PriorityQueue(Queue):
|
||||
'''A subclass of :class:`Queue` that retrieves entries in priority order (lowest first).
|
||||
|
||||
Entries are typically tuples of the form: ``(priority number, data)``.
|
||||
'''
|
||||
|
||||
def _init(self, maxsize):
|
||||
self.queue = []
|
||||
|
||||
def _put(self, item, heappush=heapq.heappush):
|
||||
heappush(self.queue, item)
|
||||
self._put_bookkeeping()
|
||||
|
||||
def _get(self, heappop=heapq.heappop):
|
||||
return heappop(self.queue)
|
||||
|
||||
|
||||
class LifoQueue(Queue):
|
||||
'''A subclass of :class:`Queue` that retrieves most recently added entries first.'''
|
||||
|
||||
def _init(self, maxsize):
|
||||
self.queue = []
|
||||
|
||||
def _put(self, item):
|
||||
self.queue.append(item)
|
||||
self._put_bookkeeping()
|
||||
|
||||
def _get(self):
|
||||
return self.queue.pop()
|
||||
315
venv/lib/python3.12/site-packages/eventlet/semaphore.py
Normal file
315
venv/lib/python3.12/site-packages/eventlet/semaphore.py
Normal file
@ -0,0 +1,315 @@
|
||||
import collections
|
||||
|
||||
import eventlet
|
||||
from eventlet import hubs
|
||||
|
||||
|
||||
class Semaphore:
|
||||
|
||||
"""An unbounded semaphore.
|
||||
Optionally initialize with a resource *count*, then :meth:`acquire` and
|
||||
:meth:`release` resources as needed. Attempting to :meth:`acquire` when
|
||||
*count* is zero suspends the calling greenthread until *count* becomes
|
||||
nonzero again.
|
||||
|
||||
This is API-compatible with :class:`threading.Semaphore`.
|
||||
|
||||
It is a context manager, and thus can be used in a with block::
|
||||
|
||||
sem = Semaphore(2)
|
||||
with sem:
|
||||
do_some_stuff()
|
||||
|
||||
If not specified, *value* defaults to 1.
|
||||
|
||||
It is possible to limit acquire time::
|
||||
|
||||
sem = Semaphore()
|
||||
ok = sem.acquire(timeout=0.1)
|
||||
# True if acquired, False if timed out.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, value=1):
|
||||
try:
|
||||
value = int(value)
|
||||
except ValueError as e:
|
||||
msg = 'Semaphore() expect value :: int, actual: {} {}'.format(type(value), str(e))
|
||||
raise TypeError(msg)
|
||||
if value < 0:
|
||||
msg = 'Semaphore() expect value >= 0, actual: {}'.format(repr(value))
|
||||
raise ValueError(msg)
|
||||
self.counter = value
|
||||
self._waiters = collections.deque()
|
||||
|
||||
def __repr__(self):
|
||||
params = (self.__class__.__name__, hex(id(self)),
|
||||
self.counter, len(self._waiters))
|
||||
return '<%s at %s c=%s _w[%s]>' % params
|
||||
|
||||
def __str__(self):
|
||||
params = (self.__class__.__name__, self.counter, len(self._waiters))
|
||||
return '<%s c=%s _w[%s]>' % params
|
||||
|
||||
def locked(self):
|
||||
"""Returns true if a call to acquire would block.
|
||||
"""
|
||||
return self.counter <= 0
|
||||
|
||||
def bounded(self):
|
||||
"""Returns False; for consistency with
|
||||
:class:`~eventlet.semaphore.CappedSemaphore`.
|
||||
"""
|
||||
return False
|
||||
|
||||
def acquire(self, blocking=True, timeout=None):
|
||||
"""Acquire a semaphore.
|
||||
|
||||
When invoked without arguments: if the internal counter is larger than
|
||||
zero on entry, decrement it by one and return immediately. If it is zero
|
||||
on entry, block, waiting until some other thread has called release() to
|
||||
make it larger than zero. This is done with proper interlocking so that
|
||||
if multiple acquire() calls are blocked, release() will wake exactly one
|
||||
of them up. The implementation may pick one at random, so the order in
|
||||
which blocked threads are awakened should not be relied on. There is no
|
||||
return value in this case.
|
||||
|
||||
When invoked with blocking set to true, do the same thing as when called
|
||||
without arguments, and return true.
|
||||
|
||||
When invoked with blocking set to false, do not block. If a call without
|
||||
an argument would block, return false immediately; otherwise, do the
|
||||
same thing as when called without arguments, and return true.
|
||||
|
||||
Timeout value must be strictly positive.
|
||||
"""
|
||||
if timeout == -1:
|
||||
timeout = None
|
||||
if timeout is not None and timeout < 0:
|
||||
raise ValueError("timeout value must be strictly positive")
|
||||
if not blocking:
|
||||
if timeout is not None:
|
||||
raise ValueError("can't specify timeout for non-blocking acquire")
|
||||
timeout = 0
|
||||
if not blocking and self.locked():
|
||||
return False
|
||||
|
||||
current_thread = eventlet.getcurrent()
|
||||
|
||||
if self.counter <= 0 or self._waiters:
|
||||
if current_thread not in self._waiters:
|
||||
self._waiters.append(current_thread)
|
||||
try:
|
||||
if timeout is not None:
|
||||
ok = False
|
||||
with eventlet.Timeout(timeout, False):
|
||||
while self.counter <= 0:
|
||||
hubs.get_hub().switch()
|
||||
ok = True
|
||||
if not ok:
|
||||
return False
|
||||
else:
|
||||
# If someone else is already in this wait loop, give them
|
||||
# a chance to get out.
|
||||
while True:
|
||||
hubs.get_hub().switch()
|
||||
if self.counter > 0:
|
||||
break
|
||||
finally:
|
||||
try:
|
||||
self._waiters.remove(current_thread)
|
||||
except ValueError:
|
||||
# Fine if its already been dropped.
|
||||
pass
|
||||
|
||||
self.counter -= 1
|
||||
return True
|
||||
|
||||
def __enter__(self):
|
||||
self.acquire()
|
||||
|
||||
def release(self, blocking=True):
|
||||
"""Release a semaphore, incrementing the internal counter by one. When
|
||||
it was zero on entry and another thread is waiting for it to become
|
||||
larger than zero again, wake up that thread.
|
||||
|
||||
The *blocking* argument is for consistency with CappedSemaphore and is
|
||||
ignored
|
||||
"""
|
||||
self.counter += 1
|
||||
if self._waiters:
|
||||
hubs.get_hub().schedule_call_global(0, self._do_acquire)
|
||||
return True
|
||||
|
||||
def _do_acquire(self):
|
||||
if self._waiters and self.counter > 0:
|
||||
waiter = self._waiters.popleft()
|
||||
waiter.switch()
|
||||
|
||||
def __exit__(self, typ, val, tb):
|
||||
self.release()
|
||||
|
||||
@property
|
||||
def balance(self):
|
||||
"""An integer value that represents how many new calls to
|
||||
:meth:`acquire` or :meth:`release` would be needed to get the counter to
|
||||
0. If it is positive, then its value is the number of acquires that can
|
||||
happen before the next acquire would block. If it is negative, it is
|
||||
the negative of the number of releases that would be required in order
|
||||
to make the counter 0 again (one more release would push the counter to
|
||||
1 and unblock acquirers). It takes into account how many greenthreads
|
||||
are currently blocking in :meth:`acquire`.
|
||||
"""
|
||||
# positive means there are free items
|
||||
# zero means there are no free items but nobody has requested one
|
||||
# negative means there are requests for items, but no items
|
||||
return self.counter - len(self._waiters)
|
||||
|
||||
|
||||
class BoundedSemaphore(Semaphore):
|
||||
|
||||
"""A bounded semaphore checks to make sure its current value doesn't exceed
|
||||
its initial value. If it does, ValueError is raised. In most situations
|
||||
semaphores are used to guard resources with limited capacity. If the
|
||||
semaphore is released too many times it's a sign of a bug. If not given,
|
||||
*value* defaults to 1.
|
||||
"""
|
||||
|
||||
def __init__(self, value=1):
|
||||
super().__init__(value)
|
||||
self.original_counter = value
|
||||
|
||||
def release(self, blocking=True):
|
||||
"""Release a semaphore, incrementing the internal counter by one. If
|
||||
the counter would exceed the initial value, raises ValueError. When
|
||||
it was zero on entry and another thread is waiting for it to become
|
||||
larger than zero again, wake up that thread.
|
||||
|
||||
The *blocking* argument is for consistency with :class:`CappedSemaphore`
|
||||
and is ignored
|
||||
"""
|
||||
if self.counter >= self.original_counter:
|
||||
raise ValueError("Semaphore released too many times")
|
||||
return super().release(blocking)
|
||||
|
||||
|
||||
class CappedSemaphore:
|
||||
|
||||
"""A blockingly bounded semaphore.
|
||||
|
||||
Optionally initialize with a resource *count*, then :meth:`acquire` and
|
||||
:meth:`release` resources as needed. Attempting to :meth:`acquire` when
|
||||
*count* is zero suspends the calling greenthread until count becomes nonzero
|
||||
again. Attempting to :meth:`release` after *count* has reached *limit*
|
||||
suspends the calling greenthread until *count* becomes less than *limit*
|
||||
again.
|
||||
|
||||
This has the same API as :class:`threading.Semaphore`, though its
|
||||
semantics and behavior differ subtly due to the upper limit on calls
|
||||
to :meth:`release`. It is **not** compatible with
|
||||
:class:`threading.BoundedSemaphore` because it blocks when reaching *limit*
|
||||
instead of raising a ValueError.
|
||||
|
||||
It is a context manager, and thus can be used in a with block::
|
||||
|
||||
sem = CappedSemaphore(2)
|
||||
with sem:
|
||||
do_some_stuff()
|
||||
"""
|
||||
|
||||
def __init__(self, count, limit):
|
||||
if count < 0:
|
||||
raise ValueError("CappedSemaphore must be initialized with a "
|
||||
"positive number, got %s" % count)
|
||||
if count > limit:
|
||||
# accidentally, this also catches the case when limit is None
|
||||
raise ValueError("'count' cannot be more than 'limit'")
|
||||
self.lower_bound = Semaphore(count)
|
||||
self.upper_bound = Semaphore(limit - count)
|
||||
|
||||
def __repr__(self):
|
||||
params = (self.__class__.__name__, hex(id(self)),
|
||||
self.balance, self.lower_bound, self.upper_bound)
|
||||
return '<%s at %s b=%s l=%s u=%s>' % params
|
||||
|
||||
def __str__(self):
|
||||
params = (self.__class__.__name__, self.balance,
|
||||
self.lower_bound, self.upper_bound)
|
||||
return '<%s b=%s l=%s u=%s>' % params
|
||||
|
||||
def locked(self):
|
||||
"""Returns true if a call to acquire would block.
|
||||
"""
|
||||
return self.lower_bound.locked()
|
||||
|
||||
def bounded(self):
|
||||
"""Returns true if a call to release would block.
|
||||
"""
|
||||
return self.upper_bound.locked()
|
||||
|
||||
def acquire(self, blocking=True):
|
||||
"""Acquire a semaphore.
|
||||
|
||||
When invoked without arguments: if the internal counter is larger than
|
||||
zero on entry, decrement it by one and return immediately. If it is zero
|
||||
on entry, block, waiting until some other thread has called release() to
|
||||
make it larger than zero. This is done with proper interlocking so that
|
||||
if multiple acquire() calls are blocked, release() will wake exactly one
|
||||
of them up. The implementation may pick one at random, so the order in
|
||||
which blocked threads are awakened should not be relied on. There is no
|
||||
return value in this case.
|
||||
|
||||
When invoked with blocking set to true, do the same thing as when called
|
||||
without arguments, and return true.
|
||||
|
||||
When invoked with blocking set to false, do not block. If a call without
|
||||
an argument would block, return false immediately; otherwise, do the
|
||||
same thing as when called without arguments, and return true.
|
||||
"""
|
||||
if not blocking and self.locked():
|
||||
return False
|
||||
self.upper_bound.release()
|
||||
try:
|
||||
return self.lower_bound.acquire()
|
||||
except:
|
||||
self.upper_bound.counter -= 1
|
||||
# using counter directly means that it can be less than zero.
|
||||
# however I certainly don't need to wait here and I don't seem to have
|
||||
# a need to care about such inconsistency
|
||||
raise
|
||||
|
||||
def __enter__(self):
|
||||
self.acquire()
|
||||
|
||||
def release(self, blocking=True):
|
||||
"""Release a semaphore. In this class, this behaves very much like
|
||||
an :meth:`acquire` but in the opposite direction.
|
||||
|
||||
Imagine the docs of :meth:`acquire` here, but with every direction
|
||||
reversed. When calling this method, it will block if the internal
|
||||
counter is greater than or equal to *limit*.
|
||||
"""
|
||||
if not blocking and self.bounded():
|
||||
return False
|
||||
self.lower_bound.release()
|
||||
try:
|
||||
return self.upper_bound.acquire()
|
||||
except:
|
||||
self.lower_bound.counter -= 1
|
||||
raise
|
||||
|
||||
def __exit__(self, typ, val, tb):
|
||||
self.release()
|
||||
|
||||
@property
|
||||
def balance(self):
|
||||
"""An integer value that represents how many new calls to
|
||||
:meth:`acquire` or :meth:`release` would be needed to get the counter to
|
||||
0. If it is positive, then its value is the number of acquires that can
|
||||
happen before the next acquire would block. If it is negative, it is
|
||||
the negative of the number of releases that would be required in order
|
||||
to make the counter 0 again (one more release would push the counter to
|
||||
1 and unblock acquirers). It takes into account how many greenthreads
|
||||
are currently blocking in :meth:`acquire` and :meth:`release`.
|
||||
"""
|
||||
return self.lower_bound.balance - self.upper_bound.balance
|
||||
@ -0,0 +1,69 @@
|
||||
import inspect
|
||||
import functools
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from eventlet.support import greenlets
|
||||
|
||||
|
||||
_MISSING = object()
|
||||
|
||||
|
||||
def get_errno(exc):
|
||||
""" Get the error code out of socket.error objects.
|
||||
socket.error in <2.5 does not have errno attribute
|
||||
socket.error in 3.x does not allow indexing access
|
||||
e.args[0] works for all.
|
||||
There are cases when args[0] is not errno.
|
||||
i.e. http://bugs.python.org/issue6471
|
||||
Maybe there are cases when errno is set, but it is not the first argument?
|
||||
"""
|
||||
|
||||
try:
|
||||
if exc.errno is not None:
|
||||
return exc.errno
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
return exc.args[0]
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
|
||||
if sys.version_info[0] < 3:
|
||||
def bytes_to_str(b, encoding='ascii'):
|
||||
return b
|
||||
else:
|
||||
def bytes_to_str(b, encoding='ascii'):
|
||||
return b.decode(encoding)
|
||||
|
||||
PY33 = sys.version_info[:2] == (3, 3)
|
||||
|
||||
|
||||
def wrap_deprecated(old, new):
|
||||
def _resolve(s):
|
||||
return 'eventlet.'+s if '.' not in s else s
|
||||
msg = '''\
|
||||
{old} is deprecated and will be removed in next version. Use {new} instead.
|
||||
Autoupgrade: fgrep -rl '{old}' . |xargs -t sed --in-place='' -e 's/{old}/{new}/'
|
||||
'''.format(old=_resolve(old), new=_resolve(new))
|
||||
|
||||
def wrapper(base):
|
||||
klass = None
|
||||
if inspect.isclass(base):
|
||||
class klass(base):
|
||||
pass
|
||||
klass.__name__ = base.__name__
|
||||
klass.__module__ = base.__module__
|
||||
|
||||
@functools.wraps(base)
|
||||
def wrapped(*a, **kw):
|
||||
warnings.warn(msg, DeprecationWarning, stacklevel=5)
|
||||
return base(*a, **kw)
|
||||
|
||||
if klass is not None:
|
||||
klass.__init__ = wrapped
|
||||
return klass
|
||||
|
||||
return wrapped
|
||||
return wrapper
|
||||
959
venv/lib/python3.12/site-packages/eventlet/support/greendns.py
Normal file
959
venv/lib/python3.12/site-packages/eventlet/support/greendns.py
Normal file
@ -0,0 +1,959 @@
|
||||
'''greendns - non-blocking DNS support for Eventlet
|
||||
'''
|
||||
|
||||
# Portions of this code taken from the gogreen project:
|
||||
# http://github.com/slideinc/gogreen
|
||||
#
|
||||
# Copyright (c) 2005-2010 Slide, Inc.
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following
|
||||
# disclaimer in the documentation and/or other materials provided
|
||||
# with the distribution.
|
||||
# * Neither the name of the author nor the names of other
|
||||
# contributors may be used to endorse or promote products derived
|
||||
# from this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
import re
|
||||
import struct
|
||||
import sys
|
||||
|
||||
import eventlet
|
||||
from eventlet import patcher
|
||||
from eventlet.green import _socket_nodns
|
||||
from eventlet.green import os
|
||||
from eventlet.green import time
|
||||
from eventlet.green import select
|
||||
from eventlet.green import ssl
|
||||
|
||||
|
||||
def import_patched(module_name):
|
||||
# Import cycle note: it's crucial to use _socket_nodns here because
|
||||
# regular evenlet.green.socket imports *this* module and if we imported
|
||||
# it back we'd end with an import cycle (socket -> greendns -> socket).
|
||||
# We break this import cycle by providing a restricted socket module.
|
||||
modules = {
|
||||
'select': select,
|
||||
'time': time,
|
||||
'os': os,
|
||||
'socket': _socket_nodns,
|
||||
'ssl': ssl,
|
||||
}
|
||||
return patcher.import_patched(module_name, **modules)
|
||||
|
||||
|
||||
dns = import_patched('dns')
|
||||
|
||||
# Handle rdtypes separately; we need fully it available as we patch the rest
|
||||
dns.rdtypes = import_patched('dns.rdtypes')
|
||||
dns.rdtypes.__all__.extend(['dnskeybase', 'dsbase', 'txtbase'])
|
||||
for pkg in dns.rdtypes.__all__:
|
||||
setattr(dns.rdtypes, pkg, import_patched('dns.rdtypes.' + pkg))
|
||||
for pkg in dns.rdtypes.IN.__all__:
|
||||
setattr(dns.rdtypes.IN, pkg, import_patched('dns.rdtypes.IN.' + pkg))
|
||||
for pkg in dns.rdtypes.ANY.__all__:
|
||||
setattr(dns.rdtypes.ANY, pkg, import_patched('dns.rdtypes.ANY.' + pkg))
|
||||
|
||||
for pkg in dns.__all__:
|
||||
if pkg == 'rdtypes':
|
||||
continue
|
||||
setattr(dns, pkg, import_patched('dns.' + pkg))
|
||||
del import_patched
|
||||
|
||||
|
||||
socket = _socket_nodns
|
||||
|
||||
DNS_QUERY_TIMEOUT = 10.0
|
||||
HOSTS_TTL = 10.0
|
||||
|
||||
# NOTE(victor): do not use EAI_*_ERROR instances for raising errors in python3, which will cause a memory leak.
|
||||
EAI_EAGAIN_ERROR = socket.gaierror(socket.EAI_AGAIN, 'Lookup timed out')
|
||||
EAI_NONAME_ERROR = socket.gaierror(socket.EAI_NONAME, 'Name or service not known')
|
||||
# EAI_NODATA was removed from RFC3493, it's now replaced with EAI_NONAME
|
||||
# socket.EAI_NODATA is not defined on FreeBSD, probably on some other platforms too.
|
||||
# https://lists.freebsd.org/pipermail/freebsd-ports/2003-October/005757.html
|
||||
EAI_NODATA_ERROR = EAI_NONAME_ERROR
|
||||
if (os.environ.get('EVENTLET_DEPRECATED_EAI_NODATA', '').lower() in ('1', 'y', 'yes')
|
||||
and hasattr(socket, 'EAI_NODATA')):
|
||||
EAI_NODATA_ERROR = socket.gaierror(socket.EAI_NODATA, 'No address associated with hostname')
|
||||
|
||||
|
||||
def _raise_new_error(error_instance):
|
||||
raise error_instance.__class__(*error_instance.args)
|
||||
|
||||
|
||||
def is_ipv4_addr(host):
|
||||
"""Return True if host is a valid IPv4 address"""
|
||||
if not isinstance(host, str):
|
||||
return False
|
||||
try:
|
||||
dns.ipv4.inet_aton(host)
|
||||
except dns.exception.SyntaxError:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def is_ipv6_addr(host):
|
||||
"""Return True if host is a valid IPv6 address"""
|
||||
if not isinstance(host, str):
|
||||
return False
|
||||
host = host.split('%', 1)[0]
|
||||
try:
|
||||
dns.ipv6.inet_aton(host)
|
||||
except dns.exception.SyntaxError:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def is_ip_addr(host):
|
||||
"""Return True if host is a valid IPv4 or IPv6 address"""
|
||||
return is_ipv4_addr(host) or is_ipv6_addr(host)
|
||||
|
||||
|
||||
# NOTE(ralonsoh): in dnspython v2.0.0, "_compute_expiration" was replaced
|
||||
# by "_compute_times".
|
||||
if hasattr(dns.query, '_compute_expiration'):
|
||||
def compute_expiration(query, timeout):
|
||||
return query._compute_expiration(timeout)
|
||||
else:
|
||||
def compute_expiration(query, timeout):
|
||||
return query._compute_times(timeout)[1]
|
||||
|
||||
|
||||
class HostsAnswer(dns.resolver.Answer):
|
||||
"""Answer class for HostsResolver object"""
|
||||
|
||||
def __init__(self, qname, rdtype, rdclass, rrset, raise_on_no_answer=True):
|
||||
"""Create a new answer
|
||||
|
||||
:qname: A dns.name.Name instance of the query name
|
||||
:rdtype: The rdatatype of the query
|
||||
:rdclass: The rdataclass of the query
|
||||
:rrset: The dns.rrset.RRset with the response, must have ttl attribute
|
||||
:raise_on_no_answer: Whether to raise dns.resolver.NoAnswer if no
|
||||
answer.
|
||||
"""
|
||||
self.response = None
|
||||
self.qname = qname
|
||||
self.rdtype = rdtype
|
||||
self.rdclass = rdclass
|
||||
self.canonical_name = qname
|
||||
if not rrset and raise_on_no_answer:
|
||||
raise dns.resolver.NoAnswer()
|
||||
self.rrset = rrset
|
||||
self.expiration = (time.time() +
|
||||
rrset.ttl if hasattr(rrset, 'ttl') else 0)
|
||||
|
||||
|
||||
class HostsResolver:
|
||||
"""Class to parse the hosts file
|
||||
|
||||
Attributes
|
||||
----------
|
||||
|
||||
:fname: The filename of the hosts file in use.
|
||||
:interval: The time between checking for hosts file modification
|
||||
"""
|
||||
|
||||
LINES_RE = re.compile(r"""
|
||||
\s* # Leading space
|
||||
([^\r\n#]*?) # The actual match, non-greedy so as not to include trailing space
|
||||
\s* # Trailing space
|
||||
(?:[#][^\r\n]+)? # Comments
|
||||
(?:$|[\r\n]+) # EOF or newline
|
||||
""", re.VERBOSE)
|
||||
|
||||
def __init__(self, fname=None, interval=HOSTS_TTL):
|
||||
self._v4 = {} # name -> ipv4
|
||||
self._v6 = {} # name -> ipv6
|
||||
self._aliases = {} # name -> canonical_name
|
||||
self.interval = interval
|
||||
self.fname = fname
|
||||
if fname is None:
|
||||
if os.name == 'posix':
|
||||
self.fname = '/etc/hosts'
|
||||
elif os.name == 'nt':
|
||||
self.fname = os.path.expandvars(
|
||||
r'%SystemRoot%\system32\drivers\etc\hosts')
|
||||
self._last_load = 0
|
||||
if self.fname:
|
||||
self._load()
|
||||
|
||||
def _readlines(self):
|
||||
"""Read the contents of the hosts file
|
||||
|
||||
Return list of lines, comment lines and empty lines are
|
||||
excluded.
|
||||
|
||||
Note that this performs disk I/O so can be blocking.
|
||||
"""
|
||||
try:
|
||||
with open(self.fname, 'rb') as fp:
|
||||
fdata = fp.read()
|
||||
except OSError:
|
||||
return []
|
||||
|
||||
udata = fdata.decode(errors='ignore')
|
||||
|
||||
return filter(None, self.LINES_RE.findall(udata))
|
||||
|
||||
def _load(self):
|
||||
"""Load hosts file
|
||||
|
||||
This will unconditionally (re)load the data from the hosts
|
||||
file.
|
||||
"""
|
||||
lines = self._readlines()
|
||||
self._v4.clear()
|
||||
self._v6.clear()
|
||||
self._aliases.clear()
|
||||
for line in lines:
|
||||
parts = line.split()
|
||||
if len(parts) < 2:
|
||||
continue
|
||||
ip = parts.pop(0)
|
||||
if is_ipv4_addr(ip):
|
||||
ipmap = self._v4
|
||||
elif is_ipv6_addr(ip):
|
||||
if ip.startswith('fe80'):
|
||||
# Do not use link-local addresses, OSX stores these here
|
||||
continue
|
||||
ipmap = self._v6
|
||||
else:
|
||||
continue
|
||||
cname = parts.pop(0).lower()
|
||||
ipmap[cname] = ip
|
||||
for alias in parts:
|
||||
alias = alias.lower()
|
||||
ipmap[alias] = ip
|
||||
self._aliases[alias] = cname
|
||||
self._last_load = time.time()
|
||||
|
||||
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
|
||||
tcp=False, source=None, raise_on_no_answer=True):
|
||||
"""Query the hosts file
|
||||
|
||||
The known rdtypes are dns.rdatatype.A, dns.rdatatype.AAAA and
|
||||
dns.rdatatype.CNAME.
|
||||
|
||||
The ``rdclass`` parameter must be dns.rdataclass.IN while the
|
||||
``tcp`` and ``source`` parameters are ignored.
|
||||
|
||||
Return a HostAnswer instance or raise a dns.resolver.NoAnswer
|
||||
exception.
|
||||
"""
|
||||
now = time.time()
|
||||
if self._last_load + self.interval < now:
|
||||
self._load()
|
||||
rdclass = dns.rdataclass.IN
|
||||
if isinstance(qname, str):
|
||||
name = qname
|
||||
qname = dns.name.from_text(qname)
|
||||
elif isinstance(qname, bytes):
|
||||
name = qname.decode("ascii")
|
||||
qname = dns.name.from_text(qname)
|
||||
else:
|
||||
name = str(qname)
|
||||
name = name.lower()
|
||||
rrset = dns.rrset.RRset(qname, rdclass, rdtype)
|
||||
rrset.ttl = self._last_load + self.interval - now
|
||||
if rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.A:
|
||||
addr = self._v4.get(name)
|
||||
if not addr and qname.is_absolute():
|
||||
addr = self._v4.get(name[:-1])
|
||||
if addr:
|
||||
rrset.add(dns.rdtypes.IN.A.A(rdclass, rdtype, addr))
|
||||
elif rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.AAAA:
|
||||
addr = self._v6.get(name)
|
||||
if not addr and qname.is_absolute():
|
||||
addr = self._v6.get(name[:-1])
|
||||
if addr:
|
||||
rrset.add(dns.rdtypes.IN.AAAA.AAAA(rdclass, rdtype, addr))
|
||||
elif rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.CNAME:
|
||||
cname = self._aliases.get(name)
|
||||
if not cname and qname.is_absolute():
|
||||
cname = self._aliases.get(name[:-1])
|
||||
if cname:
|
||||
rrset.add(dns.rdtypes.ANY.CNAME.CNAME(
|
||||
rdclass, rdtype, dns.name.from_text(cname)))
|
||||
return HostsAnswer(qname, rdtype, rdclass, rrset, raise_on_no_answer)
|
||||
|
||||
def getaliases(self, hostname):
|
||||
"""Return a list of all the aliases of a given cname"""
|
||||
# Due to the way store aliases this is a bit inefficient, this
|
||||
# clearly was an afterthought. But this is only used by
|
||||
# gethostbyname_ex so it's probably fine.
|
||||
aliases = []
|
||||
if hostname in self._aliases:
|
||||
cannon = self._aliases[hostname]
|
||||
else:
|
||||
cannon = hostname
|
||||
aliases.append(cannon)
|
||||
for alias, cname in self._aliases.items():
|
||||
if cannon == cname:
|
||||
aliases.append(alias)
|
||||
aliases.remove(hostname)
|
||||
return aliases
|
||||
|
||||
|
||||
class ResolverProxy:
|
||||
"""Resolver class which can also use /etc/hosts
|
||||
|
||||
Initialise with a HostsResolver instance in order for it to also
|
||||
use the hosts file.
|
||||
"""
|
||||
|
||||
def __init__(self, hosts_resolver=None, filename='/etc/resolv.conf'):
|
||||
"""Initialise the resolver proxy
|
||||
|
||||
:param hosts_resolver: An instance of HostsResolver to use.
|
||||
|
||||
:param filename: The filename containing the resolver
|
||||
configuration. The default value is correct for both UNIX
|
||||
and Windows, on Windows it will result in the configuration
|
||||
being read from the Windows registry.
|
||||
"""
|
||||
self._hosts = hosts_resolver
|
||||
self._filename = filename
|
||||
# NOTE(dtantsur): we cannot create a resolver here since this code is
|
||||
# executed on eventlet import. In an environment without DNS, creating
|
||||
# a Resolver will fail making eventlet unusable at all. See
|
||||
# https://github.com/eventlet/eventlet/issues/736 for details.
|
||||
self._cached_resolver = None
|
||||
|
||||
@property
|
||||
def _resolver(self):
|
||||
if self._cached_resolver is None:
|
||||
self.clear()
|
||||
return self._cached_resolver
|
||||
|
||||
@_resolver.setter
|
||||
def _resolver(self, value):
|
||||
self._cached_resolver = value
|
||||
|
||||
def clear(self):
|
||||
self._resolver = dns.resolver.Resolver(filename=self._filename)
|
||||
self._resolver.cache = dns.resolver.LRUCache()
|
||||
|
||||
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
|
||||
tcp=False, source=None, raise_on_no_answer=True,
|
||||
_hosts_rdtypes=(dns.rdatatype.A, dns.rdatatype.AAAA),
|
||||
use_network=True):
|
||||
"""Query the resolver, using /etc/hosts if enabled.
|
||||
|
||||
Behavior:
|
||||
1. if hosts is enabled and contains answer, return it now
|
||||
2. query nameservers for qname if use_network is True
|
||||
3. if qname did not contain dots, pretend it was top-level domain,
|
||||
query "foobar." and append to previous result
|
||||
"""
|
||||
result = [None, None, 0]
|
||||
|
||||
if qname is None:
|
||||
qname = '0.0.0.0'
|
||||
if isinstance(qname, str) or isinstance(qname, bytes):
|
||||
qname = dns.name.from_text(qname, None)
|
||||
|
||||
def step(fun, *args, **kwargs):
|
||||
try:
|
||||
a = fun(*args, **kwargs)
|
||||
except Exception as e:
|
||||
result[1] = e
|
||||
return False
|
||||
if a.rrset is not None and len(a.rrset):
|
||||
if result[0] is None:
|
||||
result[0] = a
|
||||
else:
|
||||
result[0].rrset.union_update(a.rrset)
|
||||
result[2] += len(a.rrset)
|
||||
return True
|
||||
|
||||
def end():
|
||||
if result[0] is not None:
|
||||
if raise_on_no_answer and result[2] == 0:
|
||||
raise dns.resolver.NoAnswer
|
||||
return result[0]
|
||||
if result[1] is not None:
|
||||
if raise_on_no_answer or not isinstance(result[1], dns.resolver.NoAnswer):
|
||||
raise result[1]
|
||||
raise dns.resolver.NXDOMAIN(qnames=(qname,))
|
||||
|
||||
if (self._hosts and (rdclass == dns.rdataclass.IN) and (rdtype in _hosts_rdtypes)):
|
||||
if step(self._hosts.query, qname, rdtype, raise_on_no_answer=False):
|
||||
if (result[0] is not None) or (result[1] is not None) or (not use_network):
|
||||
return end()
|
||||
|
||||
# Main query
|
||||
step(self._resolver.query, qname, rdtype, rdclass, tcp, source, raise_on_no_answer=False)
|
||||
|
||||
# `resolv.conf` docs say unqualified names must resolve from search (or local) domain.
|
||||
# However, common OS `getaddrinfo()` implementations append trailing dot (e.g. `db -> db.`)
|
||||
# and ask nameservers, as if top-level domain was queried.
|
||||
# This step follows established practice.
|
||||
# https://github.com/nameko/nameko/issues/392
|
||||
# https://github.com/eventlet/eventlet/issues/363
|
||||
if len(qname) == 1:
|
||||
step(self._resolver.query, qname.concatenate(dns.name.root),
|
||||
rdtype, rdclass, tcp, source, raise_on_no_answer=False)
|
||||
|
||||
return end()
|
||||
|
||||
def getaliases(self, hostname):
|
||||
"""Return a list of all the aliases of a given hostname"""
|
||||
if self._hosts:
|
||||
aliases = self._hosts.getaliases(hostname)
|
||||
else:
|
||||
aliases = []
|
||||
while True:
|
||||
try:
|
||||
ans = self._resolver.query(hostname, dns.rdatatype.CNAME)
|
||||
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
|
||||
break
|
||||
else:
|
||||
aliases.extend(str(rr.target) for rr in ans.rrset)
|
||||
hostname = ans[0].target
|
||||
return aliases
|
||||
|
||||
|
||||
resolver = ResolverProxy(hosts_resolver=HostsResolver())
|
||||
|
||||
|
||||
def resolve(name, family=socket.AF_INET, raises=True, _proxy=None,
|
||||
use_network=True):
|
||||
"""Resolve a name for a given family using the global resolver proxy.
|
||||
|
||||
This method is called by the global getaddrinfo() function. If use_network
|
||||
is False, only resolution via hosts file will be performed.
|
||||
|
||||
Return a dns.resolver.Answer instance. If there is no answer it's
|
||||
rrset will be emtpy.
|
||||
"""
|
||||
if family == socket.AF_INET:
|
||||
rdtype = dns.rdatatype.A
|
||||
elif family == socket.AF_INET6:
|
||||
rdtype = dns.rdatatype.AAAA
|
||||
else:
|
||||
raise socket.gaierror(socket.EAI_FAMILY,
|
||||
'Address family not supported')
|
||||
|
||||
if _proxy is None:
|
||||
_proxy = resolver
|
||||
try:
|
||||
try:
|
||||
return _proxy.query(name, rdtype, raise_on_no_answer=raises,
|
||||
use_network=use_network)
|
||||
except dns.resolver.NXDOMAIN:
|
||||
if not raises:
|
||||
return HostsAnswer(dns.name.Name(name),
|
||||
rdtype, dns.rdataclass.IN, None, False)
|
||||
raise
|
||||
except dns.exception.Timeout:
|
||||
_raise_new_error(EAI_EAGAIN_ERROR)
|
||||
except dns.exception.DNSException:
|
||||
_raise_new_error(EAI_NODATA_ERROR)
|
||||
|
||||
|
||||
def resolve_cname(host):
|
||||
"""Return the canonical name of a hostname"""
|
||||
try:
|
||||
ans = resolver.query(host, dns.rdatatype.CNAME)
|
||||
except dns.resolver.NoAnswer:
|
||||
return host
|
||||
except dns.exception.Timeout:
|
||||
_raise_new_error(EAI_EAGAIN_ERROR)
|
||||
except dns.exception.DNSException:
|
||||
_raise_new_error(EAI_NODATA_ERROR)
|
||||
else:
|
||||
return str(ans[0].target)
|
||||
|
||||
|
||||
def getaliases(host):
|
||||
"""Return a list of for aliases for the given hostname
|
||||
|
||||
This method does translate the dnspython exceptions into
|
||||
socket.gaierror exceptions. If no aliases are available an empty
|
||||
list will be returned.
|
||||
"""
|
||||
try:
|
||||
return resolver.getaliases(host)
|
||||
except dns.exception.Timeout:
|
||||
_raise_new_error(EAI_EAGAIN_ERROR)
|
||||
except dns.exception.DNSException:
|
||||
_raise_new_error(EAI_NODATA_ERROR)
|
||||
|
||||
|
||||
def _getaddrinfo_lookup(host, family, flags):
|
||||
"""Resolve a hostname to a list of addresses
|
||||
|
||||
Helper function for getaddrinfo.
|
||||
"""
|
||||
if flags & socket.AI_NUMERICHOST:
|
||||
_raise_new_error(EAI_NONAME_ERROR)
|
||||
addrs = []
|
||||
if family == socket.AF_UNSPEC:
|
||||
err = None
|
||||
for use_network in [False, True]:
|
||||
for qfamily in [socket.AF_INET6, socket.AF_INET]:
|
||||
try:
|
||||
answer = resolve(host, qfamily, False, use_network=use_network)
|
||||
except socket.gaierror as e:
|
||||
if e.errno not in (socket.EAI_AGAIN, EAI_NONAME_ERROR.errno, EAI_NODATA_ERROR.errno):
|
||||
raise
|
||||
err = e
|
||||
else:
|
||||
if answer.rrset:
|
||||
addrs.extend(rr.address for rr in answer.rrset)
|
||||
if addrs:
|
||||
break
|
||||
if err is not None and not addrs:
|
||||
raise err
|
||||
elif family == socket.AF_INET6 and flags & socket.AI_V4MAPPED:
|
||||
answer = resolve(host, socket.AF_INET6, False)
|
||||
if answer.rrset:
|
||||
addrs = [rr.address for rr in answer.rrset]
|
||||
if not addrs or flags & socket.AI_ALL:
|
||||
answer = resolve(host, socket.AF_INET, False)
|
||||
if answer.rrset:
|
||||
addrs = ['::ffff:' + rr.address for rr in answer.rrset]
|
||||
else:
|
||||
answer = resolve(host, family, False)
|
||||
if answer.rrset:
|
||||
addrs = [rr.address for rr in answer.rrset]
|
||||
return str(answer.qname), addrs
|
||||
|
||||
|
||||
def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
|
||||
"""Replacement for Python's socket.getaddrinfo
|
||||
|
||||
This does the A and AAAA lookups asynchronously after which it
|
||||
calls the OS' getaddrinfo(3) using the AI_NUMERICHOST flag. This
|
||||
flag ensures getaddrinfo(3) does not use the network itself and
|
||||
allows us to respect all the other arguments like the native OS.
|
||||
"""
|
||||
if isinstance(host, str):
|
||||
host = host.encode('idna').decode('ascii')
|
||||
elif isinstance(host, bytes):
|
||||
host = host.decode("ascii")
|
||||
if host is not None and not is_ip_addr(host):
|
||||
qname, addrs = _getaddrinfo_lookup(host, family, flags)
|
||||
else:
|
||||
qname = host
|
||||
addrs = [host]
|
||||
aiflags = (flags | socket.AI_NUMERICHOST) & (0xffff ^ socket.AI_CANONNAME)
|
||||
res = []
|
||||
err = None
|
||||
for addr in addrs:
|
||||
try:
|
||||
ai = socket.getaddrinfo(addr, port, family,
|
||||
type, proto, aiflags)
|
||||
except OSError as e:
|
||||
if flags & socket.AI_ADDRCONFIG:
|
||||
err = e
|
||||
continue
|
||||
raise
|
||||
res.extend(ai)
|
||||
if not res:
|
||||
if err:
|
||||
raise err
|
||||
raise socket.gaierror(socket.EAI_NONAME, 'No address found')
|
||||
if flags & socket.AI_CANONNAME:
|
||||
if not is_ip_addr(qname):
|
||||
qname = resolve_cname(qname).encode('ascii').decode('idna')
|
||||
ai = res[0]
|
||||
res[0] = (ai[0], ai[1], ai[2], qname, ai[4])
|
||||
return res
|
||||
|
||||
|
||||
def gethostbyname(hostname):
|
||||
"""Replacement for Python's socket.gethostbyname"""
|
||||
if is_ipv4_addr(hostname):
|
||||
return hostname
|
||||
rrset = resolve(hostname)
|
||||
return rrset[0].address
|
||||
|
||||
|
||||
def gethostbyname_ex(hostname):
|
||||
"""Replacement for Python's socket.gethostbyname_ex"""
|
||||
if is_ipv4_addr(hostname):
|
||||
return (hostname, [], [hostname])
|
||||
ans = resolve(hostname)
|
||||
aliases = getaliases(hostname)
|
||||
addrs = [rr.address for rr in ans.rrset]
|
||||
qname = str(ans.qname)
|
||||
if qname[-1] == '.':
|
||||
qname = qname[:-1]
|
||||
return (qname, aliases, addrs)
|
||||
|
||||
|
||||
def getnameinfo(sockaddr, flags):
|
||||
"""Replacement for Python's socket.getnameinfo.
|
||||
|
||||
Currently only supports IPv4.
|
||||
"""
|
||||
try:
|
||||
host, port = sockaddr
|
||||
except (ValueError, TypeError):
|
||||
if not isinstance(sockaddr, tuple):
|
||||
del sockaddr # to pass a stdlib test that is
|
||||
# hyper-careful about reference counts
|
||||
raise TypeError('getnameinfo() argument 1 must be a tuple')
|
||||
else:
|
||||
# must be ipv6 sockaddr, pretending we don't know how to resolve it
|
||||
_raise_new_error(EAI_NONAME_ERROR)
|
||||
|
||||
if (flags & socket.NI_NAMEREQD) and (flags & socket.NI_NUMERICHOST):
|
||||
# Conflicting flags. Punt.
|
||||
_raise_new_error(EAI_NONAME_ERROR)
|
||||
|
||||
if is_ipv4_addr(host):
|
||||
try:
|
||||
rrset = resolver.query(
|
||||
dns.reversename.from_address(host), dns.rdatatype.PTR)
|
||||
if len(rrset) > 1:
|
||||
raise OSError('sockaddr resolved to multiple addresses')
|
||||
host = rrset[0].target.to_text(omit_final_dot=True)
|
||||
except dns.exception.Timeout:
|
||||
if flags & socket.NI_NAMEREQD:
|
||||
_raise_new_error(EAI_EAGAIN_ERROR)
|
||||
except dns.exception.DNSException:
|
||||
if flags & socket.NI_NAMEREQD:
|
||||
_raise_new_error(EAI_NONAME_ERROR)
|
||||
else:
|
||||
try:
|
||||
rrset = resolver.query(host)
|
||||
if len(rrset) > 1:
|
||||
raise OSError('sockaddr resolved to multiple addresses')
|
||||
if flags & socket.NI_NUMERICHOST:
|
||||
host = rrset[0].address
|
||||
except dns.exception.Timeout:
|
||||
_raise_new_error(EAI_EAGAIN_ERROR)
|
||||
except dns.exception.DNSException:
|
||||
raise socket.gaierror(
|
||||
(socket.EAI_NODATA, 'No address associated with hostname'))
|
||||
|
||||
if not (flags & socket.NI_NUMERICSERV):
|
||||
proto = (flags & socket.NI_DGRAM) and 'udp' or 'tcp'
|
||||
port = socket.getservbyport(port, proto)
|
||||
|
||||
return (host, port)
|
||||
|
||||
|
||||
def _net_read(sock, count, expiration):
|
||||
"""coro friendly replacement for dns.query._net_read
|
||||
Read the specified number of bytes from sock. Keep trying until we
|
||||
either get the desired amount, or we hit EOF.
|
||||
A Timeout exception will be raised if the operation is not completed
|
||||
by the expiration time.
|
||||
"""
|
||||
s = bytearray()
|
||||
while count > 0:
|
||||
try:
|
||||
n = sock.recv(count)
|
||||
except socket.timeout:
|
||||
# Q: Do we also need to catch coro.CoroutineSocketWake and pass?
|
||||
if expiration - time.time() <= 0.0:
|
||||
raise dns.exception.Timeout
|
||||
eventlet.sleep(0.01)
|
||||
continue
|
||||
if n == b'':
|
||||
raise EOFError
|
||||
count = count - len(n)
|
||||
s += n
|
||||
return s
|
||||
|
||||
|
||||
def _net_write(sock, data, expiration):
|
||||
"""coro friendly replacement for dns.query._net_write
|
||||
Write the specified data to the socket.
|
||||
A Timeout exception will be raised if the operation is not completed
|
||||
by the expiration time.
|
||||
"""
|
||||
current = 0
|
||||
l = len(data)
|
||||
while current < l:
|
||||
try:
|
||||
current += sock.send(data[current:])
|
||||
except socket.timeout:
|
||||
# Q: Do we also need to catch coro.CoroutineSocketWake and pass?
|
||||
if expiration - time.time() <= 0.0:
|
||||
raise dns.exception.Timeout
|
||||
|
||||
|
||||
# Test if raise_on_truncation is an argument we should handle.
|
||||
# It was newly added in dnspython 2.0
|
||||
try:
|
||||
dns.message.from_wire("", raise_on_truncation=True)
|
||||
except dns.message.ShortHeader:
|
||||
_handle_raise_on_truncation = True
|
||||
except TypeError:
|
||||
# Argument error, there is no argument "raise_on_truncation"
|
||||
_handle_raise_on_truncation = False
|
||||
|
||||
|
||||
def udp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
|
||||
af=None, source=None, source_port=0, ignore_unexpected=False,
|
||||
one_rr_per_rrset=False, ignore_trailing=False,
|
||||
raise_on_truncation=False, sock=None, ignore_errors=False):
|
||||
"""coro friendly replacement for dns.query.udp
|
||||
Return the response obtained after sending a query via UDP.
|
||||
|
||||
@param q: the query
|
||||
@type q: dns.message.Message
|
||||
@param where: where to send the message
|
||||
@type where: string containing an IPv4 or IPv6 address
|
||||
@param timeout: The number of seconds to wait before the query times out.
|
||||
If None, the default, wait forever.
|
||||
@type timeout: float
|
||||
@param port: The port to which to send the message. The default is 53.
|
||||
@type port: int
|
||||
@param af: the address family to use. The default is None, which
|
||||
causes the address family to use to be inferred from the form of of where.
|
||||
If the inference attempt fails, AF_INET is used.
|
||||
@type af: int
|
||||
@rtype: dns.message.Message object
|
||||
@param source: source address. The default is the IPv4 wildcard address.
|
||||
@type source: string
|
||||
@param source_port: The port from which to send the message.
|
||||
The default is 0.
|
||||
@type source_port: int
|
||||
@param ignore_unexpected: If True, ignore responses from unexpected
|
||||
sources. The default is False.
|
||||
@type ignore_unexpected: bool
|
||||
@param one_rr_per_rrset: If True, put each RR into its own
|
||||
RRset.
|
||||
@type one_rr_per_rrset: bool
|
||||
@param ignore_trailing: If True, ignore trailing
|
||||
junk at end of the received message.
|
||||
@type ignore_trailing: bool
|
||||
@param raise_on_truncation: If True, raise an exception if
|
||||
the TC bit is set.
|
||||
@type raise_on_truncation: bool
|
||||
@param sock: the socket to use for the
|
||||
query. If None, the default, a socket is created. Note that
|
||||
if a socket is provided, it must be a nonblocking datagram socket,
|
||||
and the source and source_port are ignored.
|
||||
@type sock: socket.socket | None
|
||||
@param ignore_errors: if various format errors or response mismatches occur,
|
||||
continue listening.
|
||||
@type ignore_errors: bool"""
|
||||
|
||||
wire = q.to_wire()
|
||||
if af is None:
|
||||
try:
|
||||
af = dns.inet.af_for_address(where)
|
||||
except:
|
||||
af = dns.inet.AF_INET
|
||||
if af == dns.inet.AF_INET:
|
||||
destination = (where, port)
|
||||
if source is not None:
|
||||
source = (source, source_port)
|
||||
elif af == dns.inet.AF_INET6:
|
||||
# Purge any stray zeroes in source address. When doing the tuple comparison
|
||||
# below, we need to always ensure both our target and where we receive replies
|
||||
# from are compared with all zeroes removed so that we don't erroneously fail.
|
||||
# e.g. ('00::1', 53, 0, 0) != ('::1', 53, 0, 0)
|
||||
where_trunc = dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(where))
|
||||
destination = (where_trunc, port, 0, 0)
|
||||
if source is not None:
|
||||
source = (source, source_port, 0, 0)
|
||||
|
||||
if sock:
|
||||
s = sock
|
||||
else:
|
||||
s = socket.socket(af, socket.SOCK_DGRAM)
|
||||
s.settimeout(timeout)
|
||||
try:
|
||||
expiration = compute_expiration(dns.query, timeout)
|
||||
if source is not None:
|
||||
s.bind(source)
|
||||
while True:
|
||||
try:
|
||||
s.sendto(wire, destination)
|
||||
break
|
||||
except socket.timeout:
|
||||
# Q: Do we also need to catch coro.CoroutineSocketWake and pass?
|
||||
if expiration - time.time() <= 0.0:
|
||||
raise dns.exception.Timeout
|
||||
eventlet.sleep(0.01)
|
||||
continue
|
||||
|
||||
tried = False
|
||||
while True:
|
||||
# If we've tried to receive at least once, check to see if our
|
||||
# timer expired
|
||||
if tried and (expiration - time.time() <= 0.0):
|
||||
raise dns.exception.Timeout
|
||||
# Sleep if we are retrying the operation due to a bad source
|
||||
# address or a socket timeout.
|
||||
if tried:
|
||||
eventlet.sleep(0.01)
|
||||
tried = True
|
||||
|
||||
try:
|
||||
(wire, from_address) = s.recvfrom(65535)
|
||||
except socket.timeout:
|
||||
# Q: Do we also need to catch coro.CoroutineSocketWake and pass?
|
||||
continue
|
||||
if dns.inet.af_for_address(from_address[0]) == dns.inet.AF_INET6:
|
||||
# Purge all possible zeroes for ipv6 to match above logic
|
||||
addr = from_address[0]
|
||||
addr = dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(addr))
|
||||
from_address = (addr, from_address[1], from_address[2], from_address[3])
|
||||
if from_address != destination:
|
||||
if ignore_unexpected:
|
||||
continue
|
||||
else:
|
||||
raise dns.query.UnexpectedSource(
|
||||
'got a response from %s instead of %s'
|
||||
% (from_address, destination))
|
||||
try:
|
||||
if _handle_raise_on_truncation:
|
||||
r = dns.message.from_wire(wire,
|
||||
keyring=q.keyring,
|
||||
request_mac=q.mac,
|
||||
one_rr_per_rrset=one_rr_per_rrset,
|
||||
ignore_trailing=ignore_trailing,
|
||||
raise_on_truncation=raise_on_truncation)
|
||||
else:
|
||||
r = dns.message.from_wire(wire,
|
||||
keyring=q.keyring,
|
||||
request_mac=q.mac,
|
||||
one_rr_per_rrset=one_rr_per_rrset,
|
||||
ignore_trailing=ignore_trailing)
|
||||
if not q.is_response(r):
|
||||
raise dns.query.BadResponse()
|
||||
break
|
||||
except dns.message.Truncated as e:
|
||||
if ignore_errors and not q.is_response(e.message()):
|
||||
continue
|
||||
else:
|
||||
raise
|
||||
except Exception:
|
||||
if ignore_errors:
|
||||
continue
|
||||
else:
|
||||
raise
|
||||
finally:
|
||||
s.close()
|
||||
|
||||
return r
|
||||
|
||||
|
||||
def tcp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
|
||||
af=None, source=None, source_port=0,
|
||||
one_rr_per_rrset=False, ignore_trailing=False, sock=None):
|
||||
"""coro friendly replacement for dns.query.tcp
|
||||
Return the response obtained after sending a query via TCP.
|
||||
|
||||
@param q: the query
|
||||
@type q: dns.message.Message object
|
||||
@param where: where to send the message
|
||||
@type where: string containing an IPv4 or IPv6 address
|
||||
@param timeout: The number of seconds to wait before the query times out.
|
||||
If None, the default, wait forever.
|
||||
@type timeout: float
|
||||
@param port: The port to which to send the message. The default is 53.
|
||||
@type port: int
|
||||
@param af: the address family to use. The default is None, which
|
||||
causes the address family to use to be inferred from the form of of where.
|
||||
If the inference attempt fails, AF_INET is used.
|
||||
@type af: int
|
||||
@rtype: dns.message.Message object
|
||||
@param source: source address. The default is the IPv4 wildcard address.
|
||||
@type source: string
|
||||
@param source_port: The port from which to send the message.
|
||||
The default is 0.
|
||||
@type source_port: int
|
||||
@type ignore_unexpected: bool
|
||||
@param one_rr_per_rrset: If True, put each RR into its own
|
||||
RRset.
|
||||
@type one_rr_per_rrset: bool
|
||||
@param ignore_trailing: If True, ignore trailing
|
||||
junk at end of the received message.
|
||||
@type ignore_trailing: bool
|
||||
@param sock: the socket to use for the
|
||||
query. If None, the default, a socket is created. Note that
|
||||
if a socket is provided, it must be a nonblocking datagram socket,
|
||||
and the source and source_port are ignored.
|
||||
@type sock: socket.socket | None"""
|
||||
|
||||
wire = q.to_wire()
|
||||
if af is None:
|
||||
try:
|
||||
af = dns.inet.af_for_address(where)
|
||||
except:
|
||||
af = dns.inet.AF_INET
|
||||
if af == dns.inet.AF_INET:
|
||||
destination = (where, port)
|
||||
if source is not None:
|
||||
source = (source, source_port)
|
||||
elif af == dns.inet.AF_INET6:
|
||||
destination = (where, port, 0, 0)
|
||||
if source is not None:
|
||||
source = (source, source_port, 0, 0)
|
||||
if sock:
|
||||
s = sock
|
||||
else:
|
||||
s = socket.socket(af, socket.SOCK_STREAM)
|
||||
s.settimeout(timeout)
|
||||
try:
|
||||
expiration = compute_expiration(dns.query, timeout)
|
||||
if source is not None:
|
||||
s.bind(source)
|
||||
while True:
|
||||
try:
|
||||
s.connect(destination)
|
||||
break
|
||||
except socket.timeout:
|
||||
# Q: Do we also need to catch coro.CoroutineSocketWake and pass?
|
||||
if expiration - time.time() <= 0.0:
|
||||
raise dns.exception.Timeout
|
||||
eventlet.sleep(0.01)
|
||||
continue
|
||||
|
||||
l = len(wire)
|
||||
# copying the wire into tcpmsg is inefficient, but lets us
|
||||
# avoid writev() or doing a short write that would get pushed
|
||||
# onto the net
|
||||
tcpmsg = struct.pack("!H", l) + wire
|
||||
_net_write(s, tcpmsg, expiration)
|
||||
ldata = _net_read(s, 2, expiration)
|
||||
(l,) = struct.unpack("!H", ldata)
|
||||
wire = bytes(_net_read(s, l, expiration))
|
||||
finally:
|
||||
s.close()
|
||||
r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
|
||||
one_rr_per_rrset=one_rr_per_rrset,
|
||||
ignore_trailing=ignore_trailing)
|
||||
if not q.is_response(r):
|
||||
raise dns.query.BadResponse()
|
||||
return r
|
||||
|
||||
|
||||
def reset():
|
||||
resolver.clear()
|
||||
|
||||
|
||||
# Install our coro-friendly replacements for the tcp and udp query methods.
|
||||
dns.query.tcp = tcp
|
||||
dns.query.udp = udp
|
||||
@ -0,0 +1,4 @@
|
||||
import greenlet
|
||||
getcurrent = greenlet.greenlet.getcurrent
|
||||
GreenletExit = greenlet.greenlet.GreenletExit
|
||||
greenlet = greenlet.greenlet
|
||||
@ -0,0 +1,55 @@
|
||||
"""A wait callback to allow psycopg2 cooperation with eventlet.
|
||||
|
||||
Use `make_psycopg_green()` to enable eventlet support in Psycopg.
|
||||
"""
|
||||
|
||||
# Copyright (C) 2010 Daniele Varrazzo <daniele.varrazzo@gmail.com>
|
||||
# and licensed under the MIT license:
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
# THE SOFTWARE.
|
||||
|
||||
import psycopg2
|
||||
from psycopg2 import extensions
|
||||
|
||||
import eventlet.hubs
|
||||
|
||||
|
||||
def make_psycopg_green():
|
||||
"""Configure Psycopg to be used with eventlet in non-blocking way."""
|
||||
if not hasattr(extensions, 'set_wait_callback'):
|
||||
raise ImportError(
|
||||
"support for coroutines not available in this Psycopg version (%s)"
|
||||
% psycopg2.__version__)
|
||||
|
||||
extensions.set_wait_callback(eventlet_wait_callback)
|
||||
|
||||
|
||||
def eventlet_wait_callback(conn, timeout=-1):
|
||||
"""A wait callback useful to allow eventlet to work with Psycopg."""
|
||||
while 1:
|
||||
state = conn.poll()
|
||||
if state == extensions.POLL_OK:
|
||||
break
|
||||
elif state == extensions.POLL_READ:
|
||||
eventlet.hubs.trampoline(conn.fileno(), read=True)
|
||||
elif state == extensions.POLL_WRITE:
|
||||
eventlet.hubs.trampoline(conn.fileno(), write=True)
|
||||
else:
|
||||
raise psycopg2.OperationalError(
|
||||
"Bad result from poll: %r" % state)
|
||||
12
venv/lib/python3.12/site-packages/eventlet/support/pylib.py
Normal file
12
venv/lib/python3.12/site-packages/eventlet/support/pylib.py
Normal file
@ -0,0 +1,12 @@
|
||||
from py.magic import greenlet
|
||||
|
||||
import sys
|
||||
import types
|
||||
|
||||
|
||||
def emulate():
|
||||
module = types.ModuleType('greenlet')
|
||||
sys.modules['greenlet'] = module
|
||||
module.greenlet = greenlet
|
||||
module.getcurrent = greenlet.getcurrent
|
||||
module.GreenletExit = greenlet.GreenletExit
|
||||
@ -0,0 +1,12 @@
|
||||
from stackless import greenlet
|
||||
|
||||
import sys
|
||||
import types
|
||||
|
||||
|
||||
def emulate():
|
||||
module = types.ModuleType('greenlet')
|
||||
sys.modules['greenlet'] = module
|
||||
module.greenlet = greenlet
|
||||
module.getcurrent = greenlet.getcurrent
|
||||
module.GreenletExit = greenlet.GreenletExit
|
||||
@ -0,0 +1,84 @@
|
||||
"""
|
||||
Support for using stackless python. Broken and riddled with print statements
|
||||
at the moment. Please fix it!
|
||||
"""
|
||||
|
||||
import sys
|
||||
import types
|
||||
|
||||
import stackless
|
||||
|
||||
caller = None
|
||||
coro_args = {}
|
||||
tasklet_to_greenlet = {}
|
||||
|
||||
|
||||
def getcurrent():
|
||||
return tasklet_to_greenlet[stackless.getcurrent()]
|
||||
|
||||
|
||||
class FirstSwitch:
|
||||
def __init__(self, gr):
|
||||
self.gr = gr
|
||||
|
||||
def __call__(self, *args, **kw):
|
||||
# print("first call", args, kw)
|
||||
gr = self.gr
|
||||
del gr.switch
|
||||
run, gr.run = gr.run, None
|
||||
t = stackless.tasklet(run)
|
||||
gr.t = t
|
||||
tasklet_to_greenlet[t] = gr
|
||||
t.setup(*args, **kw)
|
||||
t.run()
|
||||
|
||||
|
||||
class greenlet:
|
||||
def __init__(self, run=None, parent=None):
|
||||
self.dead = False
|
||||
if parent is None:
|
||||
parent = getcurrent()
|
||||
|
||||
self.parent = parent
|
||||
if run is not None:
|
||||
self.run = run
|
||||
|
||||
self.switch = FirstSwitch(self)
|
||||
|
||||
def switch(self, *args):
|
||||
# print("switch", args)
|
||||
global caller
|
||||
caller = stackless.getcurrent()
|
||||
coro_args[self] = args
|
||||
self.t.insert()
|
||||
stackless.schedule()
|
||||
if caller is not self.t:
|
||||
caller.remove()
|
||||
rval = coro_args[self]
|
||||
return rval
|
||||
|
||||
def run(self):
|
||||
pass
|
||||
|
||||
def __bool__(self):
|
||||
return self.run is None and not self.dead
|
||||
|
||||
|
||||
class GreenletExit(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def emulate():
|
||||
module = types.ModuleType('greenlet')
|
||||
sys.modules['greenlet'] = module
|
||||
module.greenlet = greenlet
|
||||
module.getcurrent = getcurrent
|
||||
module.GreenletExit = GreenletExit
|
||||
|
||||
caller = stackless.getcurrent()
|
||||
tasklet_to_greenlet[caller] = None
|
||||
main_coro = greenlet()
|
||||
tasklet_to_greenlet[caller] = main_coro
|
||||
main_coro.t = caller
|
||||
del main_coro.switch # It's already running
|
||||
coro_args[main_coro] = None
|
||||
184
venv/lib/python3.12/site-packages/eventlet/timeout.py
Normal file
184
venv/lib/python3.12/site-packages/eventlet/timeout.py
Normal file
@ -0,0 +1,184 @@
|
||||
# Copyright (c) 2009-2010 Denis Bilenko, denis.bilenko at gmail com
|
||||
# Copyright (c) 2010 Eventlet Contributors (see AUTHORS)
|
||||
# and licensed under the MIT license:
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
# THE SOFTWARE.
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
|
||||
import eventlet
|
||||
from eventlet.support import greenlets as greenlet
|
||||
from eventlet.hubs import get_hub
|
||||
|
||||
__all__ = ['Timeout', 'with_timeout', 'wrap_is_timeout', 'is_timeout']
|
||||
|
||||
_MISSING = object()
|
||||
|
||||
# deriving from BaseException so that "except Exception as e" doesn't catch
|
||||
# Timeout exceptions.
|
||||
|
||||
|
||||
class Timeout(BaseException):
|
||||
"""Raises *exception* in the current greenthread after *timeout* seconds.
|
||||
|
||||
When *exception* is omitted or ``None``, the :class:`Timeout` instance
|
||||
itself is raised. If *seconds* is None, the timer is not scheduled, and is
|
||||
only useful if you're planning to raise it directly.
|
||||
|
||||
Timeout objects are context managers, and so can be used in with statements.
|
||||
When used in a with statement, if *exception* is ``False``, the timeout is
|
||||
still raised, but the context manager suppresses it, so the code outside the
|
||||
with-block won't see it.
|
||||
"""
|
||||
|
||||
def __init__(self, seconds=None, exception=None):
|
||||
self.seconds = seconds
|
||||
self.exception = exception
|
||||
self.timer = None
|
||||
self.start()
|
||||
|
||||
def start(self):
|
||||
"""Schedule the timeout. This is called on construction, so
|
||||
it should not be called explicitly, unless the timer has been
|
||||
canceled."""
|
||||
assert not self.pending, \
|
||||
'%r is already started; to restart it, cancel it first' % self
|
||||
if self.seconds is None: # "fake" timeout (never expires)
|
||||
self.timer = None
|
||||
elif self.exception is None or isinstance(self.exception, bool): # timeout that raises self
|
||||
self.timer = get_hub().schedule_call_global(
|
||||
self.seconds, greenlet.getcurrent().throw, self)
|
||||
else: # regular timeout with user-provided exception
|
||||
self.timer = get_hub().schedule_call_global(
|
||||
self.seconds, greenlet.getcurrent().throw, self.exception)
|
||||
return self
|
||||
|
||||
@property
|
||||
def pending(self):
|
||||
"""True if the timeout is scheduled to be raised."""
|
||||
if self.timer is not None:
|
||||
return self.timer.pending
|
||||
else:
|
||||
return False
|
||||
|
||||
def cancel(self):
|
||||
"""If the timeout is pending, cancel it. If not using
|
||||
Timeouts in ``with`` statements, always call cancel() in a
|
||||
``finally`` after the block of code that is getting timed out.
|
||||
If not canceled, the timeout will be raised later on, in some
|
||||
unexpected section of the application."""
|
||||
if self.timer is not None:
|
||||
self.timer.cancel()
|
||||
self.timer = None
|
||||
|
||||
def __repr__(self):
|
||||
classname = self.__class__.__name__
|
||||
if self.pending:
|
||||
pending = ' pending'
|
||||
else:
|
||||
pending = ''
|
||||
if self.exception is None:
|
||||
exception = ''
|
||||
else:
|
||||
exception = ' exception=%r' % self.exception
|
||||
return '<%s at %s seconds=%s%s%s>' % (
|
||||
classname, hex(id(self)), self.seconds, exception, pending)
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
>>> raise Timeout # doctest: +IGNORE_EXCEPTION_DETAIL
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
Timeout
|
||||
"""
|
||||
if self.seconds is None:
|
||||
return ''
|
||||
if self.seconds == 1:
|
||||
suffix = ''
|
||||
else:
|
||||
suffix = 's'
|
||||
if self.exception is None or self.exception is True:
|
||||
return '%s second%s' % (self.seconds, suffix)
|
||||
elif self.exception is False:
|
||||
return '%s second%s (silent)' % (self.seconds, suffix)
|
||||
else:
|
||||
return '%s second%s (%s)' % (self.seconds, suffix, self.exception)
|
||||
|
||||
def __enter__(self):
|
||||
if self.timer is None:
|
||||
self.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, typ, value, tb):
|
||||
self.cancel()
|
||||
if value is self and self.exception is False:
|
||||
return True
|
||||
|
||||
@property
|
||||
def is_timeout(self):
|
||||
return True
|
||||
|
||||
|
||||
def with_timeout(seconds, function, *args, **kwds):
|
||||
"""Wrap a call to some (yielding) function with a timeout; if the called
|
||||
function fails to return before the timeout, cancel it and return a flag
|
||||
value.
|
||||
"""
|
||||
timeout_value = kwds.pop("timeout_value", _MISSING)
|
||||
timeout = Timeout(seconds)
|
||||
try:
|
||||
try:
|
||||
return function(*args, **kwds)
|
||||
except Timeout as ex:
|
||||
if ex is timeout and timeout_value is not _MISSING:
|
||||
return timeout_value
|
||||
raise
|
||||
finally:
|
||||
timeout.cancel()
|
||||
|
||||
|
||||
def wrap_is_timeout(base):
|
||||
'''Adds `.is_timeout=True` attribute to objects returned by `base()`.
|
||||
|
||||
When `base` is class, attribute is added as read-only property. Returns `base`.
|
||||
Otherwise, it returns a function that sets attribute on result of `base()` call.
|
||||
|
||||
Wrappers make best effort to be transparent.
|
||||
'''
|
||||
if inspect.isclass(base):
|
||||
base.is_timeout = property(lambda _: True)
|
||||
return base
|
||||
|
||||
@functools.wraps(base)
|
||||
def fun(*args, **kwargs):
|
||||
ex = base(*args, **kwargs)
|
||||
ex.is_timeout = True
|
||||
return ex
|
||||
return fun
|
||||
|
||||
|
||||
if isinstance(__builtins__, dict): # seen when running tests on py310, but HOW??
|
||||
_timeout_err = __builtins__.get('TimeoutError', Timeout)
|
||||
else:
|
||||
_timeout_err = getattr(__builtins__, 'TimeoutError', Timeout)
|
||||
|
||||
|
||||
def is_timeout(obj):
|
||||
return bool(getattr(obj, 'is_timeout', False)) or isinstance(obj, _timeout_err)
|
||||
336
venv/lib/python3.12/site-packages/eventlet/tpool.py
Normal file
336
venv/lib/python3.12/site-packages/eventlet/tpool.py
Normal file
@ -0,0 +1,336 @@
|
||||
# Copyright (c) 2007-2009, Linden Research, Inc.
|
||||
# Copyright (c) 2007, IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import atexit
|
||||
try:
|
||||
import _imp as imp
|
||||
except ImportError:
|
||||
import imp
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import eventlet
|
||||
from eventlet import event, greenio, greenthread, patcher, timeout
|
||||
|
||||
__all__ = ['execute', 'Proxy', 'killall', 'set_num_threads']
|
||||
|
||||
|
||||
EXC_CLASSES = (Exception, timeout.Timeout)
|
||||
SYS_EXCS = (GeneratorExit, KeyboardInterrupt, SystemExit)
|
||||
|
||||
QUIET = True
|
||||
|
||||
socket = patcher.original('socket')
|
||||
threading = patcher.original('threading')
|
||||
Queue_module = patcher.original('queue')
|
||||
|
||||
Empty = Queue_module.Empty
|
||||
Queue = Queue_module.Queue
|
||||
|
||||
_bytetosend = b' '
|
||||
_coro = None
|
||||
_nthreads = int(os.environ.get('EVENTLET_THREADPOOL_SIZE', 20))
|
||||
_reqq = _rspq = None
|
||||
_rsock = _wsock = None
|
||||
_setup_already = False
|
||||
_threads = []
|
||||
|
||||
|
||||
def tpool_trampoline():
|
||||
global _rspq
|
||||
while True:
|
||||
try:
|
||||
_c = _rsock.recv(1)
|
||||
assert _c
|
||||
# FIXME: this is probably redundant since using sockets instead of pipe now
|
||||
except ValueError:
|
||||
break # will be raised when pipe is closed
|
||||
while not _rspq.empty():
|
||||
try:
|
||||
(e, rv) = _rspq.get(block=False)
|
||||
e.send(rv)
|
||||
e = rv = None
|
||||
except Empty:
|
||||
pass
|
||||
|
||||
|
||||
def tworker():
|
||||
global _rspq
|
||||
while True:
|
||||
try:
|
||||
msg = _reqq.get()
|
||||
except AttributeError:
|
||||
return # can't get anything off of a dud queue
|
||||
if msg is None:
|
||||
return
|
||||
(e, meth, args, kwargs) = msg
|
||||
rv = None
|
||||
try:
|
||||
rv = meth(*args, **kwargs)
|
||||
except SYS_EXCS:
|
||||
raise
|
||||
except EXC_CLASSES:
|
||||
rv = sys.exc_info()
|
||||
traceback.clear_frames(rv[1].__traceback__)
|
||||
# test_leakage_from_tracebacks verifies that the use of
|
||||
# exc_info does not lead to memory leaks
|
||||
_rspq.put((e, rv))
|
||||
msg = meth = args = kwargs = e = rv = None
|
||||
_wsock.sendall(_bytetosend)
|
||||
|
||||
|
||||
def execute(meth, *args, **kwargs):
|
||||
"""
|
||||
Execute *meth* in a Python thread, blocking the current coroutine/
|
||||
greenthread until the method completes.
|
||||
|
||||
The primary use case for this is to wrap an object or module that is not
|
||||
amenable to monkeypatching or any of the other tricks that Eventlet uses
|
||||
to achieve cooperative yielding. With tpool, you can force such objects to
|
||||
cooperate with green threads by sticking them in native threads, at the cost
|
||||
of some overhead.
|
||||
"""
|
||||
setup()
|
||||
# if already in tpool, don't recurse into the tpool
|
||||
# also, call functions directly if we're inside an import lock, because
|
||||
# if meth does any importing (sadly common), it will hang
|
||||
my_thread = threading.current_thread()
|
||||
if my_thread in _threads or imp.lock_held() or _nthreads == 0:
|
||||
return meth(*args, **kwargs)
|
||||
|
||||
e = event.Event()
|
||||
_reqq.put((e, meth, args, kwargs))
|
||||
|
||||
rv = e.wait()
|
||||
if isinstance(rv, tuple) \
|
||||
and len(rv) == 3 \
|
||||
and isinstance(rv[1], EXC_CLASSES):
|
||||
(c, e, tb) = rv
|
||||
if not QUIET:
|
||||
traceback.print_exception(c, e, tb)
|
||||
traceback.print_stack()
|
||||
raise e.with_traceback(tb)
|
||||
return rv
|
||||
|
||||
|
||||
def proxy_call(autowrap, f, *args, **kwargs):
|
||||
"""
|
||||
Call a function *f* and returns the value. If the type of the return value
|
||||
is in the *autowrap* collection, then it is wrapped in a :class:`Proxy`
|
||||
object before return.
|
||||
|
||||
Normally *f* will be called in the threadpool with :func:`execute`; if the
|
||||
keyword argument "nonblocking" is set to ``True``, it will simply be
|
||||
executed directly. This is useful if you have an object which has methods
|
||||
that don't need to be called in a separate thread, but which return objects
|
||||
that should be Proxy wrapped.
|
||||
"""
|
||||
if kwargs.pop('nonblocking', False):
|
||||
rv = f(*args, **kwargs)
|
||||
else:
|
||||
rv = execute(f, *args, **kwargs)
|
||||
if isinstance(rv, autowrap):
|
||||
return Proxy(rv, autowrap)
|
||||
else:
|
||||
return rv
|
||||
|
||||
|
||||
class Proxy:
|
||||
"""
|
||||
a simple proxy-wrapper of any object that comes with a
|
||||
methods-only interface, in order to forward every method
|
||||
invocation onto a thread in the native-thread pool. A key
|
||||
restriction is that the object's methods should not switch
|
||||
greenlets or use Eventlet primitives, since they are in a
|
||||
different thread from the main hub, and therefore might behave
|
||||
unexpectedly. This is for running native-threaded code
|
||||
only.
|
||||
|
||||
It's common to want to have some of the attributes or return
|
||||
values also wrapped in Proxy objects (for example, database
|
||||
connection objects produce cursor objects which also should be
|
||||
wrapped in Proxy objects to remain nonblocking). *autowrap*, if
|
||||
supplied, is a collection of types; if an attribute or return
|
||||
value matches one of those types (via isinstance), it will be
|
||||
wrapped in a Proxy. *autowrap_names* is a collection
|
||||
of strings, which represent the names of attributes that should be
|
||||
wrapped in Proxy objects when accessed.
|
||||
"""
|
||||
|
||||
def __init__(self, obj, autowrap=(), autowrap_names=()):
|
||||
self._obj = obj
|
||||
self._autowrap = autowrap
|
||||
self._autowrap_names = autowrap_names
|
||||
|
||||
def __getattr__(self, attr_name):
|
||||
f = getattr(self._obj, attr_name)
|
||||
if not hasattr(f, '__call__'):
|
||||
if isinstance(f, self._autowrap) or attr_name in self._autowrap_names:
|
||||
return Proxy(f, self._autowrap)
|
||||
return f
|
||||
|
||||
def doit(*args, **kwargs):
|
||||
result = proxy_call(self._autowrap, f, *args, **kwargs)
|
||||
if attr_name in self._autowrap_names and not isinstance(result, Proxy):
|
||||
return Proxy(result)
|
||||
return result
|
||||
return doit
|
||||
|
||||
# the following are a buncha methods that the python interpeter
|
||||
# doesn't use getattr to retrieve and therefore have to be defined
|
||||
# explicitly
|
||||
def __getitem__(self, key):
|
||||
return proxy_call(self._autowrap, self._obj.__getitem__, key)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
return proxy_call(self._autowrap, self._obj.__setitem__, key, value)
|
||||
|
||||
def __deepcopy__(self, memo=None):
|
||||
return proxy_call(self._autowrap, self._obj.__deepcopy__, memo)
|
||||
|
||||
def __copy__(self, memo=None):
|
||||
return proxy_call(self._autowrap, self._obj.__copy__, memo)
|
||||
|
||||
def __call__(self, *a, **kw):
|
||||
if '__call__' in self._autowrap_names:
|
||||
return Proxy(proxy_call(self._autowrap, self._obj, *a, **kw))
|
||||
else:
|
||||
return proxy_call(self._autowrap, self._obj, *a, **kw)
|
||||
|
||||
def __enter__(self):
|
||||
return proxy_call(self._autowrap, self._obj.__enter__)
|
||||
|
||||
def __exit__(self, *exc):
|
||||
return proxy_call(self._autowrap, self._obj.__exit__, *exc)
|
||||
|
||||
# these don't go through a proxy call, because they're likely to
|
||||
# be called often, and are unlikely to be implemented on the
|
||||
# wrapped object in such a way that they would block
|
||||
def __eq__(self, rhs):
|
||||
return self._obj == rhs
|
||||
|
||||
def __hash__(self):
|
||||
return self._obj.__hash__()
|
||||
|
||||
def __repr__(self):
|
||||
return self._obj.__repr__()
|
||||
|
||||
def __str__(self):
|
||||
return self._obj.__str__()
|
||||
|
||||
def __len__(self):
|
||||
return len(self._obj)
|
||||
|
||||
def __nonzero__(self):
|
||||
return bool(self._obj)
|
||||
# Python3
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def __iter__(self):
|
||||
it = iter(self._obj)
|
||||
if it == self._obj:
|
||||
return self
|
||||
else:
|
||||
return Proxy(it)
|
||||
|
||||
def next(self):
|
||||
return proxy_call(self._autowrap, next, self._obj)
|
||||
# Python3
|
||||
__next__ = next
|
||||
|
||||
|
||||
def setup():
|
||||
global _rsock, _wsock, _coro, _setup_already, _rspq, _reqq
|
||||
if _setup_already:
|
||||
return
|
||||
else:
|
||||
_setup_already = True
|
||||
|
||||
assert _nthreads >= 0, "Can't specify negative number of threads"
|
||||
if _nthreads == 0:
|
||||
import warnings
|
||||
warnings.warn("Zero threads in tpool. All tpool.execute calls will\
|
||||
execute in main thread. Check the value of the environment \
|
||||
variable EVENTLET_THREADPOOL_SIZE.", RuntimeWarning)
|
||||
_reqq = Queue(maxsize=-1)
|
||||
_rspq = Queue(maxsize=-1)
|
||||
|
||||
# connected socket pair
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.bind(('127.0.0.1', 0))
|
||||
sock.listen(1)
|
||||
csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
csock.connect(sock.getsockname())
|
||||
csock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
|
||||
_wsock, _addr = sock.accept()
|
||||
_wsock.settimeout(None)
|
||||
_wsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
|
||||
sock.close()
|
||||
_rsock = greenio.GreenSocket(csock)
|
||||
_rsock.settimeout(None)
|
||||
|
||||
for i in range(_nthreads):
|
||||
t = threading.Thread(target=tworker,
|
||||
name="tpool_thread_%s" % i)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
_threads.append(t)
|
||||
|
||||
_coro = greenthread.spawn_n(tpool_trampoline)
|
||||
# This yield fixes subtle error with GreenSocket.__del__
|
||||
eventlet.sleep(0)
|
||||
|
||||
|
||||
# Avoid ResourceWarning unclosed socket on Python3.2+
|
||||
@atexit.register
|
||||
def killall():
|
||||
global _setup_already, _rspq, _rsock, _wsock
|
||||
if not _setup_already:
|
||||
return
|
||||
|
||||
# This yield fixes freeze in some scenarios
|
||||
eventlet.sleep(0)
|
||||
|
||||
for thr in _threads:
|
||||
_reqq.put(None)
|
||||
for thr in _threads:
|
||||
thr.join()
|
||||
del _threads[:]
|
||||
|
||||
# return any remaining results
|
||||
while (_rspq is not None) and not _rspq.empty():
|
||||
try:
|
||||
(e, rv) = _rspq.get(block=False)
|
||||
e.send(rv)
|
||||
e = rv = None
|
||||
except Empty:
|
||||
pass
|
||||
|
||||
if _coro is not None:
|
||||
greenthread.kill(_coro)
|
||||
if _rsock is not None:
|
||||
_rsock.close()
|
||||
_rsock = None
|
||||
if _wsock is not None:
|
||||
_wsock.close()
|
||||
_wsock = None
|
||||
_rspq = None
|
||||
_setup_already = False
|
||||
|
||||
|
||||
def set_num_threads(nthreads):
|
||||
global _nthreads
|
||||
_nthreads = nthreads
|
||||
868
venv/lib/python3.12/site-packages/eventlet/websocket.py
Normal file
868
venv/lib/python3.12/site-packages/eventlet/websocket.py
Normal file
@ -0,0 +1,868 @@
|
||||
import base64
|
||||
import codecs
|
||||
import collections
|
||||
import errno
|
||||
from random import Random
|
||||
from socket import error as SocketError
|
||||
import string
|
||||
import struct
|
||||
import sys
|
||||
import time
|
||||
|
||||
import zlib
|
||||
|
||||
try:
|
||||
from hashlib import md5, sha1
|
||||
except ImportError: # pragma NO COVER
|
||||
from md5 import md5
|
||||
from sha import sha as sha1
|
||||
|
||||
from eventlet import semaphore
|
||||
from eventlet import wsgi
|
||||
from eventlet.green import socket
|
||||
from eventlet.support import get_errno
|
||||
|
||||
# Python 2's utf8 decoding is more lenient than we'd like
|
||||
# In order to pass autobahn's testsuite we need stricter validation
|
||||
# if available...
|
||||
for _mod in ('wsaccel.utf8validator', 'autobahn.utf8validator'):
|
||||
# autobahn has it's own python-based validator. in newest versions
|
||||
# this prefers to use wsaccel, a cython based implementation, if available.
|
||||
# wsaccel may also be installed w/out autobahn, or with a earlier version.
|
||||
try:
|
||||
utf8validator = __import__(_mod, {}, {}, [''])
|
||||
except ImportError:
|
||||
utf8validator = None
|
||||
else:
|
||||
break
|
||||
|
||||
ACCEPTABLE_CLIENT_ERRORS = {errno.ECONNRESET, errno.EPIPE, errno.ESHUTDOWN}
|
||||
DEFAULT_MAX_FRAME_LENGTH = 8 << 20
|
||||
|
||||
__all__ = ["WebSocketWSGI", "WebSocket"]
|
||||
PROTOCOL_GUID = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
|
||||
VALID_CLOSE_STATUS = set(
|
||||
list(range(1000, 1004)) +
|
||||
list(range(1007, 1012)) +
|
||||
# 3000-3999: reserved for use by libraries, frameworks,
|
||||
# and applications
|
||||
list(range(3000, 4000)) +
|
||||
# 4000-4999: reserved for private use and thus can't
|
||||
# be registered
|
||||
list(range(4000, 5000))
|
||||
)
|
||||
|
||||
|
||||
class BadRequest(Exception):
|
||||
def __init__(self, status='400 Bad Request', body=None, headers=None):
|
||||
super(Exception, self).__init__()
|
||||
self.status = status
|
||||
self.body = body
|
||||
self.headers = headers
|
||||
|
||||
|
||||
class WebSocketWSGI:
|
||||
"""Wraps a websocket handler function in a WSGI application.
|
||||
|
||||
Use it like this::
|
||||
|
||||
@websocket.WebSocketWSGI
|
||||
def my_handler(ws):
|
||||
from_browser = ws.wait()
|
||||
ws.send("from server")
|
||||
|
||||
The single argument to the function will be an instance of
|
||||
:class:`WebSocket`. To close the socket, simply return from the
|
||||
function. Note that the server will log the websocket request at
|
||||
the time of closure.
|
||||
|
||||
An optional argument max_frame_length can be given, which will set the
|
||||
maximum incoming *uncompressed* payload length of a frame. By default, this
|
||||
is set to 8MiB. Note that excessive values here might create a DOS attack
|
||||
vector.
|
||||
"""
|
||||
|
||||
def __init__(self, handler, max_frame_length=DEFAULT_MAX_FRAME_LENGTH):
|
||||
self.handler = handler
|
||||
self.protocol_version = None
|
||||
self.support_legacy_versions = True
|
||||
self.supported_protocols = []
|
||||
self.origin_checker = None
|
||||
self.max_frame_length = max_frame_length
|
||||
|
||||
@classmethod
|
||||
def configured(cls,
|
||||
handler=None,
|
||||
supported_protocols=None,
|
||||
origin_checker=None,
|
||||
support_legacy_versions=False):
|
||||
def decorator(handler):
|
||||
inst = cls(handler)
|
||||
inst.support_legacy_versions = support_legacy_versions
|
||||
inst.origin_checker = origin_checker
|
||||
if supported_protocols:
|
||||
inst.supported_protocols = supported_protocols
|
||||
return inst
|
||||
if handler is None:
|
||||
return decorator
|
||||
return decorator(handler)
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
http_connection_parts = [
|
||||
part.strip()
|
||||
for part in environ.get('HTTP_CONNECTION', '').lower().split(',')]
|
||||
if not ('upgrade' in http_connection_parts and
|
||||
environ.get('HTTP_UPGRADE', '').lower() == 'websocket'):
|
||||
# need to check a few more things here for true compliance
|
||||
start_response('400 Bad Request', [('Connection', 'close')])
|
||||
return []
|
||||
|
||||
try:
|
||||
if 'HTTP_SEC_WEBSOCKET_VERSION' in environ:
|
||||
ws = self._handle_hybi_request(environ)
|
||||
elif self.support_legacy_versions:
|
||||
ws = self._handle_legacy_request(environ)
|
||||
else:
|
||||
raise BadRequest()
|
||||
except BadRequest as e:
|
||||
status = e.status
|
||||
body = e.body or b''
|
||||
headers = e.headers or []
|
||||
start_response(status,
|
||||
[('Connection', 'close'), ] + headers)
|
||||
return [body]
|
||||
|
||||
# We're ready to switch protocols; if running under Eventlet
|
||||
# (this is not always the case) then flag the connection as
|
||||
# idle to play well with a graceful stop
|
||||
if 'eventlet.set_idle' in environ:
|
||||
environ['eventlet.set_idle']()
|
||||
try:
|
||||
self.handler(ws)
|
||||
except OSError as e:
|
||||
if get_errno(e) not in ACCEPTABLE_CLIENT_ERRORS:
|
||||
raise
|
||||
# Make sure we send the closing frame
|
||||
ws._send_closing_frame(True)
|
||||
# use this undocumented feature of eventlet.wsgi to ensure that it
|
||||
# doesn't barf on the fact that we didn't call start_response
|
||||
wsgi.WSGI_LOCAL.already_handled = True
|
||||
return []
|
||||
|
||||
def _handle_legacy_request(self, environ):
|
||||
if 'eventlet.input' in environ:
|
||||
sock = environ['eventlet.input'].get_socket()
|
||||
elif 'gunicorn.socket' in environ:
|
||||
sock = environ['gunicorn.socket']
|
||||
else:
|
||||
raise Exception('No eventlet.input or gunicorn.socket present in environ.')
|
||||
|
||||
if 'HTTP_SEC_WEBSOCKET_KEY1' in environ:
|
||||
self.protocol_version = 76
|
||||
if 'HTTP_SEC_WEBSOCKET_KEY2' not in environ:
|
||||
raise BadRequest()
|
||||
else:
|
||||
self.protocol_version = 75
|
||||
|
||||
if self.protocol_version == 76:
|
||||
key1 = self._extract_number(environ['HTTP_SEC_WEBSOCKET_KEY1'])
|
||||
key2 = self._extract_number(environ['HTTP_SEC_WEBSOCKET_KEY2'])
|
||||
# There's no content-length header in the request, but it has 8
|
||||
# bytes of data.
|
||||
environ['wsgi.input'].content_length = 8
|
||||
key3 = environ['wsgi.input'].read(8)
|
||||
key = struct.pack(">II", key1, key2) + key3
|
||||
response = md5(key).digest()
|
||||
|
||||
# Start building the response
|
||||
scheme = 'ws'
|
||||
if environ.get('wsgi.url_scheme') == 'https':
|
||||
scheme = 'wss'
|
||||
location = '%s://%s%s%s' % (
|
||||
scheme,
|
||||
environ.get('HTTP_HOST'),
|
||||
environ.get('SCRIPT_NAME'),
|
||||
environ.get('PATH_INFO')
|
||||
)
|
||||
qs = environ.get('QUERY_STRING')
|
||||
if qs is not None:
|
||||
location += '?' + qs
|
||||
if self.protocol_version == 75:
|
||||
handshake_reply = (
|
||||
b"HTTP/1.1 101 Web Socket Protocol Handshake\r\n"
|
||||
b"Upgrade: WebSocket\r\n"
|
||||
b"Connection: Upgrade\r\n"
|
||||
b"WebSocket-Origin: " + environ.get('HTTP_ORIGIN').encode() + b"\r\n"
|
||||
b"WebSocket-Location: " + location.encode() + b"\r\n\r\n"
|
||||
)
|
||||
elif self.protocol_version == 76:
|
||||
handshake_reply = (
|
||||
b"HTTP/1.1 101 WebSocket Protocol Handshake\r\n"
|
||||
b"Upgrade: WebSocket\r\n"
|
||||
b"Connection: Upgrade\r\n"
|
||||
b"Sec-WebSocket-Origin: " + environ.get('HTTP_ORIGIN').encode() + b"\r\n"
|
||||
b"Sec-WebSocket-Protocol: " +
|
||||
environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', 'default').encode() + b"\r\n"
|
||||
b"Sec-WebSocket-Location: " + location.encode() + b"\r\n"
|
||||
b"\r\n" + response
|
||||
)
|
||||
else: # pragma NO COVER
|
||||
raise ValueError("Unknown WebSocket protocol version.")
|
||||
sock.sendall(handshake_reply)
|
||||
return WebSocket(sock, environ, self.protocol_version)
|
||||
|
||||
def _parse_extension_header(self, header):
|
||||
if header is None:
|
||||
return None
|
||||
res = {}
|
||||
for ext in header.split(","):
|
||||
parts = ext.split(";")
|
||||
config = {}
|
||||
for part in parts[1:]:
|
||||
key_val = part.split("=")
|
||||
if len(key_val) == 1:
|
||||
config[key_val[0].strip().lower()] = True
|
||||
else:
|
||||
config[key_val[0].strip().lower()] = key_val[1].strip().strip('"').lower()
|
||||
res.setdefault(parts[0].strip().lower(), []).append(config)
|
||||
return res
|
||||
|
||||
def _negotiate_permessage_deflate(self, extensions):
|
||||
if not extensions:
|
||||
return None
|
||||
deflate = extensions.get("permessage-deflate")
|
||||
if deflate is None:
|
||||
return None
|
||||
for config in deflate:
|
||||
# We'll evaluate each config in the client's preferred order and pick
|
||||
# the first that we can support.
|
||||
want_config = {
|
||||
# These are bool options, we can support both
|
||||
"server_no_context_takeover": config.get("server_no_context_takeover", False),
|
||||
"client_no_context_takeover": config.get("client_no_context_takeover", False)
|
||||
}
|
||||
# These are either bool OR int options. True means the client can accept a value
|
||||
# for the option, a number means the client wants that specific value.
|
||||
max_wbits = min(zlib.MAX_WBITS, 15)
|
||||
mwb = config.get("server_max_window_bits")
|
||||
if mwb is not None:
|
||||
if mwb is True:
|
||||
want_config["server_max_window_bits"] = max_wbits
|
||||
else:
|
||||
want_config["server_max_window_bits"] = \
|
||||
int(config.get("server_max_window_bits", max_wbits))
|
||||
if not (8 <= want_config["server_max_window_bits"] <= 15):
|
||||
continue
|
||||
mwb = config.get("client_max_window_bits")
|
||||
if mwb is not None:
|
||||
if mwb is True:
|
||||
want_config["client_max_window_bits"] = max_wbits
|
||||
else:
|
||||
want_config["client_max_window_bits"] = \
|
||||
int(config.get("client_max_window_bits", max_wbits))
|
||||
if not (8 <= want_config["client_max_window_bits"] <= 15):
|
||||
continue
|
||||
return want_config
|
||||
return None
|
||||
|
||||
def _format_extension_header(self, parsed_extensions):
|
||||
if not parsed_extensions:
|
||||
return None
|
||||
parts = []
|
||||
for name, config in parsed_extensions.items():
|
||||
ext_parts = [name.encode()]
|
||||
for key, value in config.items():
|
||||
if value is False:
|
||||
pass
|
||||
elif value is True:
|
||||
ext_parts.append(key.encode())
|
||||
else:
|
||||
ext_parts.append(("%s=%s" % (key, str(value))).encode())
|
||||
parts.append(b"; ".join(ext_parts))
|
||||
return b", ".join(parts)
|
||||
|
||||
def _handle_hybi_request(self, environ):
|
||||
if 'eventlet.input' in environ:
|
||||
sock = environ['eventlet.input'].get_socket()
|
||||
elif 'gunicorn.socket' in environ:
|
||||
sock = environ['gunicorn.socket']
|
||||
else:
|
||||
raise Exception('No eventlet.input or gunicorn.socket present in environ.')
|
||||
|
||||
hybi_version = environ['HTTP_SEC_WEBSOCKET_VERSION']
|
||||
if hybi_version not in ('8', '13', ):
|
||||
raise BadRequest(status='426 Upgrade Required',
|
||||
headers=[('Sec-WebSocket-Version', '8, 13')])
|
||||
self.protocol_version = int(hybi_version)
|
||||
if 'HTTP_SEC_WEBSOCKET_KEY' not in environ:
|
||||
# That's bad.
|
||||
raise BadRequest()
|
||||
origin = environ.get(
|
||||
'HTTP_ORIGIN',
|
||||
(environ.get('HTTP_SEC_WEBSOCKET_ORIGIN', '')
|
||||
if self.protocol_version <= 8 else ''))
|
||||
if self.origin_checker is not None:
|
||||
if not self.origin_checker(environ.get('HTTP_HOST'), origin):
|
||||
raise BadRequest(status='403 Forbidden')
|
||||
protocols = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', None)
|
||||
negotiated_protocol = None
|
||||
if protocols:
|
||||
for p in (i.strip() for i in protocols.split(',')):
|
||||
if p in self.supported_protocols:
|
||||
negotiated_protocol = p
|
||||
break
|
||||
|
||||
key = environ['HTTP_SEC_WEBSOCKET_KEY']
|
||||
response = base64.b64encode(sha1(key.encode() + PROTOCOL_GUID).digest())
|
||||
handshake_reply = [b"HTTP/1.1 101 Switching Protocols",
|
||||
b"Upgrade: websocket",
|
||||
b"Connection: Upgrade",
|
||||
b"Sec-WebSocket-Accept: " + response]
|
||||
if negotiated_protocol:
|
||||
handshake_reply.append(b"Sec-WebSocket-Protocol: " + negotiated_protocol.encode())
|
||||
|
||||
parsed_extensions = {}
|
||||
extensions = self._parse_extension_header(environ.get("HTTP_SEC_WEBSOCKET_EXTENSIONS"))
|
||||
|
||||
deflate = self._negotiate_permessage_deflate(extensions)
|
||||
if deflate is not None:
|
||||
parsed_extensions["permessage-deflate"] = deflate
|
||||
|
||||
formatted_ext = self._format_extension_header(parsed_extensions)
|
||||
if formatted_ext is not None:
|
||||
handshake_reply.append(b"Sec-WebSocket-Extensions: " + formatted_ext)
|
||||
|
||||
sock.sendall(b'\r\n'.join(handshake_reply) + b'\r\n\r\n')
|
||||
return RFC6455WebSocket(sock, environ, self.protocol_version,
|
||||
protocol=negotiated_protocol,
|
||||
extensions=parsed_extensions,
|
||||
max_frame_length=self.max_frame_length)
|
||||
|
||||
def _extract_number(self, value):
|
||||
"""
|
||||
Utility function which, given a string like 'g98sd 5[]221@1', will
|
||||
return 9852211. Used to parse the Sec-WebSocket-Key headers.
|
||||
"""
|
||||
out = ""
|
||||
spaces = 0
|
||||
for char in value:
|
||||
if char in string.digits:
|
||||
out += char
|
||||
elif char == " ":
|
||||
spaces += 1
|
||||
return int(out) // spaces
|
||||
|
||||
|
||||
class WebSocket:
|
||||
"""A websocket object that handles the details of
|
||||
serialization/deserialization to the socket.
|
||||
|
||||
The primary way to interact with a :class:`WebSocket` object is to
|
||||
call :meth:`send` and :meth:`wait` in order to pass messages back
|
||||
and forth with the browser. Also available are the following
|
||||
properties:
|
||||
|
||||
path
|
||||
The path value of the request. This is the same as the WSGI PATH_INFO variable,
|
||||
but more convenient.
|
||||
protocol
|
||||
The value of the Websocket-Protocol header.
|
||||
origin
|
||||
The value of the 'Origin' header.
|
||||
environ
|
||||
The full WSGI environment for this request.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, sock, environ, version=76):
|
||||
"""
|
||||
:param socket: The eventlet socket
|
||||
:type socket: :class:`eventlet.greenio.GreenSocket`
|
||||
:param environ: The wsgi environment
|
||||
:param version: The WebSocket spec version to follow (default is 76)
|
||||
"""
|
||||
self.log = environ.get('wsgi.errors', sys.stderr)
|
||||
self.log_context = 'server={shost}/{spath} client={caddr}:{cport}'.format(
|
||||
shost=environ.get('HTTP_HOST'),
|
||||
spath=environ.get('SCRIPT_NAME', '') + environ.get('PATH_INFO', ''),
|
||||
caddr=environ.get('REMOTE_ADDR'), cport=environ.get('REMOTE_PORT'),
|
||||
)
|
||||
self.socket = sock
|
||||
self.origin = environ.get('HTTP_ORIGIN')
|
||||
self.protocol = environ.get('HTTP_WEBSOCKET_PROTOCOL')
|
||||
self.path = environ.get('PATH_INFO')
|
||||
self.environ = environ
|
||||
self.version = version
|
||||
self.websocket_closed = False
|
||||
self._buf = b""
|
||||
self._msgs = collections.deque()
|
||||
self._sendlock = semaphore.Semaphore()
|
||||
|
||||
def _pack_message(self, message):
|
||||
"""Pack the message inside ``00`` and ``FF``
|
||||
|
||||
As per the dataframing section (5.3) for the websocket spec
|
||||
"""
|
||||
if isinstance(message, str):
|
||||
message = message.encode('utf-8')
|
||||
elif not isinstance(message, bytes):
|
||||
message = str(message).encode()
|
||||
packed = b"\x00" + message + b"\xFF"
|
||||
return packed
|
||||
|
||||
def _parse_messages(self):
|
||||
""" Parses for messages in the buffer *buf*. It is assumed that
|
||||
the buffer contains the start character for a message, but that it
|
||||
may contain only part of the rest of the message.
|
||||
|
||||
Returns an array of messages, and the buffer remainder that
|
||||
didn't contain any full messages."""
|
||||
msgs = []
|
||||
end_idx = 0
|
||||
buf = self._buf
|
||||
while buf:
|
||||
frame_type = buf[0]
|
||||
if frame_type == 0:
|
||||
# Normal message.
|
||||
end_idx = buf.find(b"\xFF")
|
||||
if end_idx == -1: # pragma NO COVER
|
||||
break
|
||||
msgs.append(buf[1:end_idx].decode('utf-8', 'replace'))
|
||||
buf = buf[end_idx + 1:]
|
||||
elif frame_type == 255:
|
||||
# Closing handshake.
|
||||
assert buf[1] == 0, "Unexpected closing handshake: %r" % buf
|
||||
self.websocket_closed = True
|
||||
break
|
||||
else:
|
||||
raise ValueError("Don't understand how to parse this type of message: %r" % buf)
|
||||
self._buf = buf
|
||||
return msgs
|
||||
|
||||
def send(self, message):
|
||||
"""Send a message to the browser.
|
||||
|
||||
*message* should be convertable to a string; unicode objects should be
|
||||
encodable as utf-8. Raises socket.error with errno of 32
|
||||
(broken pipe) if the socket has already been closed by the client."""
|
||||
packed = self._pack_message(message)
|
||||
# if two greenthreads are trying to send at the same time
|
||||
# on the same socket, sendlock prevents interleaving and corruption
|
||||
self._sendlock.acquire()
|
||||
try:
|
||||
self.socket.sendall(packed)
|
||||
finally:
|
||||
self._sendlock.release()
|
||||
|
||||
def wait(self):
|
||||
"""Waits for and deserializes messages.
|
||||
|
||||
Returns a single message; the oldest not yet processed. If the client
|
||||
has already closed the connection, returns None. This is different
|
||||
from normal socket behavior because the empty string is a valid
|
||||
websocket message."""
|
||||
while not self._msgs:
|
||||
# Websocket might be closed already.
|
||||
if self.websocket_closed:
|
||||
return None
|
||||
# no parsed messages, must mean buf needs more data
|
||||
delta = self.socket.recv(8096)
|
||||
if delta == b'':
|
||||
return None
|
||||
self._buf += delta
|
||||
msgs = self._parse_messages()
|
||||
self._msgs.extend(msgs)
|
||||
return self._msgs.popleft()
|
||||
|
||||
def _send_closing_frame(self, ignore_send_errors=False):
|
||||
"""Sends the closing frame to the client, if required."""
|
||||
if self.version == 76 and not self.websocket_closed:
|
||||
try:
|
||||
self.socket.sendall(b"\xff\x00")
|
||||
except OSError:
|
||||
# Sometimes, like when the remote side cuts off the connection,
|
||||
# we don't care about this.
|
||||
if not ignore_send_errors: # pragma NO COVER
|
||||
raise
|
||||
self.websocket_closed = True
|
||||
|
||||
def close(self):
|
||||
"""Forcibly close the websocket; generally it is preferable to
|
||||
return from the handler method."""
|
||||
try:
|
||||
self._send_closing_frame(True)
|
||||
self.socket.shutdown(True)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOTCONN:
|
||||
self.log.write('{ctx} socket shutdown error: {e}'.format(ctx=self.log_context, e=e))
|
||||
finally:
|
||||
self.socket.close()
|
||||
|
||||
|
||||
class ConnectionClosedError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class FailedConnectionError(Exception):
|
||||
def __init__(self, status, message):
|
||||
super().__init__(status, message)
|
||||
self.message = message
|
||||
self.status = status
|
||||
|
||||
|
||||
class ProtocolError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class RFC6455WebSocket(WebSocket):
|
||||
def __init__(self, sock, environ, version=13, protocol=None, client=False, extensions=None,
|
||||
max_frame_length=DEFAULT_MAX_FRAME_LENGTH):
|
||||
super().__init__(sock, environ, version)
|
||||
self.iterator = self._iter_frames()
|
||||
self.client = client
|
||||
self.protocol = protocol
|
||||
self.extensions = extensions or {}
|
||||
|
||||
self._deflate_enc = None
|
||||
self._deflate_dec = None
|
||||
self.max_frame_length = max_frame_length
|
||||
self._remote_close_data = None
|
||||
|
||||
class UTF8Decoder:
|
||||
def __init__(self):
|
||||
if utf8validator:
|
||||
self.validator = utf8validator.Utf8Validator()
|
||||
else:
|
||||
self.validator = None
|
||||
decoderclass = codecs.getincrementaldecoder('utf8')
|
||||
self.decoder = decoderclass()
|
||||
|
||||
def reset(self):
|
||||
if self.validator:
|
||||
self.validator.reset()
|
||||
self.decoder.reset()
|
||||
|
||||
def decode(self, data, final=False):
|
||||
if self.validator:
|
||||
valid, eocp, c_i, t_i = self.validator.validate(data)
|
||||
if not valid:
|
||||
raise ValueError('Data is not valid unicode')
|
||||
return self.decoder.decode(data, final)
|
||||
|
||||
def _get_permessage_deflate_enc(self):
|
||||
options = self.extensions.get("permessage-deflate")
|
||||
if options is None:
|
||||
return None
|
||||
|
||||
def _make():
|
||||
return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED,
|
||||
-options.get("client_max_window_bits" if self.client
|
||||
else "server_max_window_bits",
|
||||
zlib.MAX_WBITS))
|
||||
|
||||
if options.get("client_no_context_takeover" if self.client
|
||||
else "server_no_context_takeover"):
|
||||
# This option means we have to make a new one every time
|
||||
return _make()
|
||||
else:
|
||||
if self._deflate_enc is None:
|
||||
self._deflate_enc = _make()
|
||||
return self._deflate_enc
|
||||
|
||||
def _get_permessage_deflate_dec(self, rsv1):
|
||||
options = self.extensions.get("permessage-deflate")
|
||||
if options is None or not rsv1:
|
||||
return None
|
||||
|
||||
def _make():
|
||||
return zlib.decompressobj(-options.get("server_max_window_bits" if self.client
|
||||
else "client_max_window_bits",
|
||||
zlib.MAX_WBITS))
|
||||
|
||||
if options.get("server_no_context_takeover" if self.client
|
||||
else "client_no_context_takeover"):
|
||||
# This option means we have to make a new one every time
|
||||
return _make()
|
||||
else:
|
||||
if self._deflate_dec is None:
|
||||
self._deflate_dec = _make()
|
||||
return self._deflate_dec
|
||||
|
||||
def _get_bytes(self, numbytes):
|
||||
data = b''
|
||||
while len(data) < numbytes:
|
||||
d = self.socket.recv(numbytes - len(data))
|
||||
if not d:
|
||||
raise ConnectionClosedError()
|
||||
data = data + d
|
||||
return data
|
||||
|
||||
class Message:
|
||||
def __init__(self, opcode, max_frame_length, decoder=None, decompressor=None):
|
||||
self.decoder = decoder
|
||||
self.data = []
|
||||
self.finished = False
|
||||
self.opcode = opcode
|
||||
self.decompressor = decompressor
|
||||
self.max_frame_length = max_frame_length
|
||||
|
||||
def push(self, data, final=False):
|
||||
self.finished = final
|
||||
self.data.append(data)
|
||||
|
||||
def getvalue(self):
|
||||
data = b"".join(self.data)
|
||||
if not self.opcode & 8 and self.decompressor:
|
||||
data = self.decompressor.decompress(data + b"\x00\x00\xff\xff", self.max_frame_length)
|
||||
if self.decompressor.unconsumed_tail:
|
||||
raise FailedConnectionError(
|
||||
1009,
|
||||
"Incoming compressed frame exceeds length limit of {} bytes.".format(self.max_frame_length))
|
||||
|
||||
if self.decoder:
|
||||
data = self.decoder.decode(data, self.finished)
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def _apply_mask(data, mask, length=None, offset=0):
|
||||
if length is None:
|
||||
length = len(data)
|
||||
cnt = range(length)
|
||||
return b''.join(bytes((data[i] ^ mask[(offset + i) % 4],)) for i in cnt)
|
||||
|
||||
def _handle_control_frame(self, opcode, data):
|
||||
if opcode == 8: # connection close
|
||||
self._remote_close_data = data
|
||||
if not data:
|
||||
status = 1000
|
||||
elif len(data) > 1:
|
||||
status = struct.unpack_from('!H', data)[0]
|
||||
if not status or status not in VALID_CLOSE_STATUS:
|
||||
raise FailedConnectionError(
|
||||
1002,
|
||||
"Unexpected close status code.")
|
||||
try:
|
||||
data = self.UTF8Decoder().decode(data[2:], True)
|
||||
except (UnicodeDecodeError, ValueError):
|
||||
raise FailedConnectionError(
|
||||
1002,
|
||||
"Close message data should be valid UTF-8.")
|
||||
else:
|
||||
status = 1002
|
||||
self.close(close_data=(status, ''))
|
||||
raise ConnectionClosedError()
|
||||
elif opcode == 9: # ping
|
||||
self.send(data, control_code=0xA)
|
||||
elif opcode == 0xA: # pong
|
||||
pass
|
||||
else:
|
||||
raise FailedConnectionError(
|
||||
1002, "Unknown control frame received.")
|
||||
|
||||
def _iter_frames(self):
|
||||
fragmented_message = None
|
||||
try:
|
||||
while True:
|
||||
message = self._recv_frame(message=fragmented_message)
|
||||
if message.opcode & 8:
|
||||
self._handle_control_frame(
|
||||
message.opcode, message.getvalue())
|
||||
continue
|
||||
if fragmented_message and message is not fragmented_message:
|
||||
raise RuntimeError('Unexpected message change.')
|
||||
fragmented_message = message
|
||||
if message.finished:
|
||||
data = fragmented_message.getvalue()
|
||||
fragmented_message = None
|
||||
yield data
|
||||
except FailedConnectionError:
|
||||
exc_typ, exc_val, exc_tb = sys.exc_info()
|
||||
self.close(close_data=(exc_val.status, exc_val.message))
|
||||
except ConnectionClosedError:
|
||||
return
|
||||
except Exception:
|
||||
self.close(close_data=(1011, 'Internal Server Error'))
|
||||
raise
|
||||
|
||||
def _recv_frame(self, message=None):
|
||||
recv = self._get_bytes
|
||||
|
||||
# Unpacking the frame described in Section 5.2 of RFC6455
|
||||
# (https://tools.ietf.org/html/rfc6455#section-5.2)
|
||||
header = recv(2)
|
||||
a, b = struct.unpack('!BB', header)
|
||||
finished = a >> 7 == 1
|
||||
rsv123 = a >> 4 & 7
|
||||
rsv1 = rsv123 & 4
|
||||
if rsv123:
|
||||
if rsv1 and "permessage-deflate" not in self.extensions:
|
||||
# must be zero - unless it's compressed then rsv1 is true
|
||||
raise FailedConnectionError(
|
||||
1002,
|
||||
"RSV1, RSV2, RSV3: MUST be 0 unless an extension is"
|
||||
" negotiated that defines meanings for non-zero values.")
|
||||
opcode = a & 15
|
||||
if opcode not in (0, 1, 2, 8, 9, 0xA):
|
||||
raise FailedConnectionError(1002, "Unknown opcode received.")
|
||||
masked = b & 128 == 128
|
||||
if not masked and not self.client:
|
||||
raise FailedConnectionError(1002, "A client MUST mask all frames"
|
||||
" that it sends to the server")
|
||||
length = b & 127
|
||||
if opcode & 8:
|
||||
if not finished:
|
||||
raise FailedConnectionError(1002, "Control frames must not"
|
||||
" be fragmented.")
|
||||
if length > 125:
|
||||
raise FailedConnectionError(
|
||||
1002,
|
||||
"All control frames MUST have a payload length of 125"
|
||||
" bytes or less")
|
||||
elif opcode and message:
|
||||
raise FailedConnectionError(
|
||||
1002,
|
||||
"Received a non-continuation opcode within"
|
||||
" fragmented message.")
|
||||
elif not opcode and not message:
|
||||
raise FailedConnectionError(
|
||||
1002,
|
||||
"Received continuation opcode with no previous"
|
||||
" fragments received.")
|
||||
if length == 126:
|
||||
length = struct.unpack('!H', recv(2))[0]
|
||||
elif length == 127:
|
||||
length = struct.unpack('!Q', recv(8))[0]
|
||||
|
||||
if length > self.max_frame_length:
|
||||
raise FailedConnectionError(1009, "Incoming frame of {} bytes is above length limit of {} bytes.".format(
|
||||
length, self.max_frame_length))
|
||||
if masked:
|
||||
mask = struct.unpack('!BBBB', recv(4))
|
||||
received = 0
|
||||
if not message or opcode & 8:
|
||||
decoder = self.UTF8Decoder() if opcode == 1 else None
|
||||
decompressor = self._get_permessage_deflate_dec(rsv1)
|
||||
message = self.Message(opcode, self.max_frame_length, decoder=decoder, decompressor=decompressor)
|
||||
if not length:
|
||||
message.push(b'', final=finished)
|
||||
else:
|
||||
while received < length:
|
||||
d = self.socket.recv(length - received)
|
||||
if not d:
|
||||
raise ConnectionClosedError()
|
||||
dlen = len(d)
|
||||
if masked:
|
||||
d = self._apply_mask(d, mask, length=dlen, offset=received)
|
||||
received = received + dlen
|
||||
try:
|
||||
message.push(d, final=finished)
|
||||
except (UnicodeDecodeError, ValueError):
|
||||
raise FailedConnectionError(
|
||||
1007, "Text data must be valid utf-8")
|
||||
return message
|
||||
|
||||
def _pack_message(self, message, masked=False,
|
||||
continuation=False, final=True, control_code=None):
|
||||
is_text = False
|
||||
if isinstance(message, str):
|
||||
message = message.encode('utf-8')
|
||||
is_text = True
|
||||
|
||||
compress_bit = 0
|
||||
compressor = self._get_permessage_deflate_enc()
|
||||
# Control frames are identified by opcodes where the most significant
|
||||
# bit of the opcode is 1. Currently defined opcodes for control frames
|
||||
# include 0x8 (Close), 0x9 (Ping), and 0xA (Pong). Opcodes 0xB-0xF are
|
||||
# reserved for further control frames yet to be defined.
|
||||
# https://datatracker.ietf.org/doc/html/rfc6455#section-5.5
|
||||
is_control_frame = (control_code or 0) & 8
|
||||
# An endpoint MUST NOT set the "Per-Message Compressed" bit of control
|
||||
# frames and non-first fragments of a data message. An endpoint
|
||||
# receiving such a frame MUST _Fail the WebSocket Connection_.
|
||||
# https://datatracker.ietf.org/doc/html/rfc7692#section-6.1
|
||||
if message and compressor and not is_control_frame:
|
||||
message = compressor.compress(message)
|
||||
message += compressor.flush(zlib.Z_SYNC_FLUSH)
|
||||
assert message[-4:] == b"\x00\x00\xff\xff"
|
||||
message = message[:-4]
|
||||
compress_bit = 1 << 6
|
||||
|
||||
length = len(message)
|
||||
if not length:
|
||||
# no point masking empty data
|
||||
masked = False
|
||||
if control_code:
|
||||
if control_code not in (8, 9, 0xA):
|
||||
raise ProtocolError('Unknown control opcode.')
|
||||
if continuation or not final:
|
||||
raise ProtocolError('Control frame cannot be a fragment.')
|
||||
if length > 125:
|
||||
raise ProtocolError('Control frame data too large (>125).')
|
||||
header = struct.pack('!B', control_code | 1 << 7)
|
||||
else:
|
||||
opcode = 0 if continuation else ((1 if is_text else 2) | compress_bit)
|
||||
header = struct.pack('!B', opcode | (1 << 7 if final else 0))
|
||||
lengthdata = 1 << 7 if masked else 0
|
||||
if length > 65535:
|
||||
lengthdata = struct.pack('!BQ', lengthdata | 127, length)
|
||||
elif length > 125:
|
||||
lengthdata = struct.pack('!BH', lengthdata | 126, length)
|
||||
else:
|
||||
lengthdata = struct.pack('!B', lengthdata | length)
|
||||
if masked:
|
||||
# NOTE: RFC6455 states:
|
||||
# A server MUST NOT mask any frames that it sends to the client
|
||||
rand = Random(time.time())
|
||||
mask = [rand.getrandbits(8) for _ in range(4)]
|
||||
message = RFC6455WebSocket._apply_mask(message, mask, length)
|
||||
maskdata = struct.pack('!BBBB', *mask)
|
||||
else:
|
||||
maskdata = b''
|
||||
|
||||
return b''.join((header, lengthdata, maskdata, message))
|
||||
|
||||
def wait(self):
|
||||
for i in self.iterator:
|
||||
return i
|
||||
|
||||
def _send(self, frame):
|
||||
self._sendlock.acquire()
|
||||
try:
|
||||
self.socket.sendall(frame)
|
||||
finally:
|
||||
self._sendlock.release()
|
||||
|
||||
def send(self, message, **kw):
|
||||
kw['masked'] = self.client
|
||||
payload = self._pack_message(message, **kw)
|
||||
self._send(payload)
|
||||
|
||||
def _send_closing_frame(self, ignore_send_errors=False, close_data=None):
|
||||
if self.version in (8, 13) and not self.websocket_closed:
|
||||
if close_data is not None:
|
||||
status, msg = close_data
|
||||
if isinstance(msg, str):
|
||||
msg = msg.encode('utf-8')
|
||||
data = struct.pack('!H', status) + msg
|
||||
else:
|
||||
data = ''
|
||||
try:
|
||||
self.send(data, control_code=8)
|
||||
except OSError:
|
||||
# Sometimes, like when the remote side cuts off the connection,
|
||||
# we don't care about this.
|
||||
if not ignore_send_errors: # pragma NO COVER
|
||||
raise
|
||||
self.websocket_closed = True
|
||||
|
||||
def close(self, close_data=None):
|
||||
"""Forcibly close the websocket; generally it is preferable to
|
||||
return from the handler method."""
|
||||
try:
|
||||
self._send_closing_frame(close_data=close_data, ignore_send_errors=True)
|
||||
self.socket.shutdown(socket.SHUT_WR)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOTCONN:
|
||||
self.log.write('{ctx} socket shutdown error: {e}'.format(ctx=self.log_context, e=e))
|
||||
finally:
|
||||
self.socket.close()
|
||||
1102
venv/lib/python3.12/site-packages/eventlet/wsgi.py
Normal file
1102
venv/lib/python3.12/site-packages/eventlet/wsgi.py
Normal file
File diff suppressed because it is too large
Load Diff
130
venv/lib/python3.12/site-packages/eventlet/zipkin/README.rst
Normal file
130
venv/lib/python3.12/site-packages/eventlet/zipkin/README.rst
Normal file
@ -0,0 +1,130 @@
|
||||
eventlet.zipkin
|
||||
===============
|
||||
|
||||
`Zipkin <http://twitter.github.io/zipkin/>`_ is a distributed tracing system developed at Twitter.
|
||||
This package provides a WSGI application using eventlet
|
||||
with tracing facility that complies with Zipkin.
|
||||
|
||||
Why use it?
|
||||
From the http://twitter.github.io/zipkin/:
|
||||
|
||||
"Collecting traces helps developers gain deeper knowledge about how
|
||||
certain requests perform in a distributed system. Let's say we're having
|
||||
problems with user requests timing out. We can look up traced requests
|
||||
that timed out and display it in the web UI. We'll be able to quickly
|
||||
find the service responsible for adding the unexpected response time. If
|
||||
the service has been annotated adequately we can also find out where in
|
||||
that service the issue is happening."
|
||||
|
||||
|
||||
Screenshot
|
||||
----------
|
||||
|
||||
Zipkin web ui screenshots obtained when applying this module to
|
||||
`OpenStack swift <https://github.com/openstack/swift>`_ are in example/.
|
||||
|
||||
|
||||
Requirement
|
||||
-----------
|
||||
|
||||
A eventlet.zipkin needs `python scribe client <https://pypi.python.org/pypi/facebook-scribe/>`_
|
||||
and `thrift <https://thrift.apache.org/>`_ (>=0.9),
|
||||
because the zipkin collector speaks `scribe <https://github.com/facebookarchive/scribe>`_ protocol.
|
||||
Below command will install both scribe client and thrift.
|
||||
|
||||
Install facebook-scribe:
|
||||
|
||||
::
|
||||
|
||||
pip install facebook-scribe
|
||||
|
||||
**Python**: ``2.7`` (Because the current Python Thrift release doesn't support Python 3)
|
||||
|
||||
|
||||
How to use
|
||||
----------
|
||||
|
||||
Add tracing facility to your application
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Apply the monkey patch before you start wsgi server.
|
||||
|
||||
.. code:: python
|
||||
|
||||
# Add only 2 lines to your code
|
||||
from eventlet.zipkin import patcher
|
||||
patcher.enable_trace_patch()
|
||||
|
||||
# existing code
|
||||
from eventlet import wsgi
|
||||
wsgi.server(sock, app)
|
||||
|
||||
You can pass some parameters to ``enable_trace_patch()``
|
||||
|
||||
* host: Scribe daemon IP address (default: '127.0.0.1')
|
||||
* port: Scribe daemon port (default: 9410)
|
||||
* trace_app_log: A Boolean indicating if the tracer will trace application log together or not. This facility assume that your application uses python standard logging library. (default: False)
|
||||
* sampling_rate: A Float value (0.0~1.0) that indicates the tracing frequency. If you specify 1.0, all requests are traced and sent to Zipkin collecotr. If you specify 0.1, only 1/10 requests are traced. (defult: 1.0)
|
||||
|
||||
|
||||
(Option) Annotation API
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
If you want to record additional information,
|
||||
you can use below API from anywhere in your code.
|
||||
|
||||
.. code:: python
|
||||
|
||||
from eventlet.zipkin import api
|
||||
|
||||
api.put_annotation('Cache miss for %s' % request)
|
||||
api.put_key_value('key', 'value')
|
||||
|
||||
|
||||
|
||||
|
||||
Zipkin simple setup
|
||||
-------------------
|
||||
|
||||
::
|
||||
|
||||
$ git clone https://github.com/twitter/zipkin.git
|
||||
$ cd zipkin
|
||||
# Open 3 terminals
|
||||
(terminal1) $ bin/collector
|
||||
(terminal2) $ bin/query
|
||||
(terminal3) $ bin/web
|
||||
|
||||
Access http://localhost:8080 from your browser.
|
||||
|
||||
|
||||
(Option) fluentd
|
||||
----------------
|
||||
If you want to buffer the tracing data for performance,
|
||||
`fluentd scribe plugin <http://docs.fluentd.org/articles/in_scribe>`_ is available.
|
||||
Since ``out_scribe plugin`` extends `Buffer Plugin <http://docs.fluentd.org/articles/buffer-plugin-overview>`_ ,
|
||||
you can customize buffering parameters in the manner of fluentd.
|
||||
Scribe plugin is included in td-agent by default.
|
||||
|
||||
|
||||
Sample: ``/etc/td-agent/td-agent.conf``
|
||||
|
||||
::
|
||||
|
||||
# in_scribe
|
||||
<source>
|
||||
type scribe
|
||||
port 9999
|
||||
</source>
|
||||
|
||||
# out_scribe
|
||||
<match zipkin.**>
|
||||
type scribe
|
||||
host Zipkin_collector_IP
|
||||
port 9410
|
||||
flush_interval 60s
|
||||
buffer_chunk_limit 256m
|
||||
</match>
|
||||
|
||||
| And, you need to specify ``patcher.enable_trace_patch(port=9999)`` for in_scribe.
|
||||
| In this case, trace data is passed like below.
|
||||
| Your application => Local fluentd in_scribe (9999) => Local fluentd out_scribe <buffering> =====> Remote zipkin collector (9410)
|
||||
|
||||
@ -0,0 +1,8 @@
|
||||
_thrift
|
||||
========
|
||||
|
||||
* This directory is auto-generated by Thrift Compiler by using
|
||||
https://github.com/twitter/zipkin/blob/master/zipkin-thrift/src/main/thrift/com/twitter/zipkin/zipkinCore.thrift
|
||||
|
||||
* Do not modify this directory.
|
||||
|
||||
@ -0,0 +1,55 @@
|
||||
# Copyright 2012 Twitter Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
namespace java com.twitter.zipkin.gen
|
||||
namespace rb Zipkin
|
||||
|
||||
//************** Collection related structs **************
|
||||
|
||||
// these are the annotations we always expect to find in a span
|
||||
const string CLIENT_SEND = "cs"
|
||||
const string CLIENT_RECV = "cr"
|
||||
const string SERVER_SEND = "ss"
|
||||
const string SERVER_RECV = "sr"
|
||||
|
||||
// this represents a host and port in a network
|
||||
struct Endpoint {
|
||||
1: i32 ipv4,
|
||||
2: i16 port // beware that this will give us negative ports. some conversion needed
|
||||
3: string service_name // which service did this operation happen on?
|
||||
}
|
||||
|
||||
// some event took place, either one by the framework or by the user
|
||||
struct Annotation {
|
||||
1: i64 timestamp // microseconds from epoch
|
||||
2: string value // what happened at the timestamp?
|
||||
3: optional Endpoint host // host this happened on
|
||||
}
|
||||
|
||||
enum AnnotationType { BOOL, BYTES, I16, I32, I64, DOUBLE, STRING }
|
||||
|
||||
struct BinaryAnnotation {
|
||||
1: string key,
|
||||
2: binary value,
|
||||
3: AnnotationType annotation_type,
|
||||
4: optional Endpoint host
|
||||
}
|
||||
|
||||
struct Span {
|
||||
1: i64 trace_id // unique trace id, use for all spans in trace
|
||||
3: string name, // span name, rpc method for example
|
||||
4: i64 id, // unique span id, only used for this span
|
||||
5: optional i64 parent_id, // parent span id
|
||||
6: list<Annotation> annotations, // list of all annotations/events that occured
|
||||
8: list<BinaryAnnotation> binary_annotations // any binary annotations
|
||||
}
|
||||
@ -0,0 +1 @@
|
||||
__all__ = ['ttypes', 'constants']
|
||||
@ -0,0 +1,14 @@
|
||||
#
|
||||
# Autogenerated by Thrift Compiler (0.8.0)
|
||||
#
|
||||
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
#
|
||||
#
|
||||
|
||||
from thrift.Thrift import TType, TMessageType, TException
|
||||
from ttypes import *
|
||||
|
||||
CLIENT_SEND = "cs"
|
||||
CLIENT_RECV = "cr"
|
||||
SERVER_SEND = "ss"
|
||||
SERVER_RECV = "sr"
|
||||
@ -0,0 +1,452 @@
|
||||
#
|
||||
# Autogenerated by Thrift Compiler (0.8.0)
|
||||
#
|
||||
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||
#
|
||||
#
|
||||
|
||||
from thrift.Thrift import TType, TMessageType, TException
|
||||
|
||||
from thrift.transport import TTransport
|
||||
from thrift.protocol import TBinaryProtocol, TProtocol
|
||||
try:
|
||||
from thrift.protocol import fastbinary
|
||||
except:
|
||||
fastbinary = None
|
||||
|
||||
|
||||
class AnnotationType:
|
||||
BOOL = 0
|
||||
BYTES = 1
|
||||
I16 = 2
|
||||
I32 = 3
|
||||
I64 = 4
|
||||
DOUBLE = 5
|
||||
STRING = 6
|
||||
|
||||
_VALUES_TO_NAMES = {
|
||||
0: "BOOL",
|
||||
1: "BYTES",
|
||||
2: "I16",
|
||||
3: "I32",
|
||||
4: "I64",
|
||||
5: "DOUBLE",
|
||||
6: "STRING",
|
||||
}
|
||||
|
||||
_NAMES_TO_VALUES = {
|
||||
"BOOL": 0,
|
||||
"BYTES": 1,
|
||||
"I16": 2,
|
||||
"I32": 3,
|
||||
"I64": 4,
|
||||
"DOUBLE": 5,
|
||||
"STRING": 6,
|
||||
}
|
||||
|
||||
|
||||
class Endpoint:
|
||||
"""
|
||||
Attributes:
|
||||
- ipv4
|
||||
- port
|
||||
- service_name
|
||||
"""
|
||||
|
||||
thrift_spec = (
|
||||
None, # 0
|
||||
(1, TType.I32, 'ipv4', None, None, ), # 1
|
||||
(2, TType.I16, 'port', None, None, ), # 2
|
||||
(3, TType.STRING, 'service_name', None, None, ), # 3
|
||||
)
|
||||
|
||||
def __init__(self, ipv4=None, port=None, service_name=None,):
|
||||
self.ipv4 = ipv4
|
||||
self.port = port
|
||||
self.service_name = service_name
|
||||
|
||||
def read(self, iprot):
|
||||
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
|
||||
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
|
||||
return
|
||||
iprot.readStructBegin()
|
||||
while True:
|
||||
(fname, ftype, fid) = iprot.readFieldBegin()
|
||||
if ftype == TType.STOP:
|
||||
break
|
||||
if fid == 1:
|
||||
if ftype == TType.I32:
|
||||
self.ipv4 = iprot.readI32();
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
elif fid == 2:
|
||||
if ftype == TType.I16:
|
||||
self.port = iprot.readI16();
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
elif fid == 3:
|
||||
if ftype == TType.STRING:
|
||||
self.service_name = iprot.readString();
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
iprot.readFieldEnd()
|
||||
iprot.readStructEnd()
|
||||
|
||||
def write(self, oprot):
|
||||
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
|
||||
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
|
||||
return
|
||||
oprot.writeStructBegin('Endpoint')
|
||||
if self.ipv4 is not None:
|
||||
oprot.writeFieldBegin('ipv4', TType.I32, 1)
|
||||
oprot.writeI32(self.ipv4)
|
||||
oprot.writeFieldEnd()
|
||||
if self.port is not None:
|
||||
oprot.writeFieldBegin('port', TType.I16, 2)
|
||||
oprot.writeI16(self.port)
|
||||
oprot.writeFieldEnd()
|
||||
if self.service_name is not None:
|
||||
oprot.writeFieldBegin('service_name', TType.STRING, 3)
|
||||
oprot.writeString(self.service_name)
|
||||
oprot.writeFieldEnd()
|
||||
oprot.writeFieldStop()
|
||||
oprot.writeStructEnd()
|
||||
|
||||
def validate(self):
|
||||
return
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
L = ['%s=%r' % (key, value)
|
||||
for key, value in self.__dict__.iteritems()]
|
||||
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
|
||||
class Annotation:
|
||||
"""
|
||||
Attributes:
|
||||
- timestamp
|
||||
- value
|
||||
- host
|
||||
"""
|
||||
|
||||
thrift_spec = (
|
||||
None, # 0
|
||||
(1, TType.I64, 'timestamp', None, None, ), # 1
|
||||
(2, TType.STRING, 'value', None, None, ), # 2
|
||||
(3, TType.STRUCT, 'host', (Endpoint, Endpoint.thrift_spec), None, ), # 3
|
||||
)
|
||||
|
||||
def __init__(self, timestamp=None, value=None, host=None,):
|
||||
self.timestamp = timestamp
|
||||
self.value = value
|
||||
self.host = host
|
||||
|
||||
def read(self, iprot):
|
||||
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
|
||||
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
|
||||
return
|
||||
iprot.readStructBegin()
|
||||
while True:
|
||||
(fname, ftype, fid) = iprot.readFieldBegin()
|
||||
if ftype == TType.STOP:
|
||||
break
|
||||
if fid == 1:
|
||||
if ftype == TType.I64:
|
||||
self.timestamp = iprot.readI64();
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
elif fid == 2:
|
||||
if ftype == TType.STRING:
|
||||
self.value = iprot.readString();
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
elif fid == 3:
|
||||
if ftype == TType.STRUCT:
|
||||
self.host = Endpoint()
|
||||
self.host.read(iprot)
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
iprot.readFieldEnd()
|
||||
iprot.readStructEnd()
|
||||
|
||||
def write(self, oprot):
|
||||
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
|
||||
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
|
||||
return
|
||||
oprot.writeStructBegin('Annotation')
|
||||
if self.timestamp is not None:
|
||||
oprot.writeFieldBegin('timestamp', TType.I64, 1)
|
||||
oprot.writeI64(self.timestamp)
|
||||
oprot.writeFieldEnd()
|
||||
if self.value is not None:
|
||||
oprot.writeFieldBegin('value', TType.STRING, 2)
|
||||
oprot.writeString(self.value)
|
||||
oprot.writeFieldEnd()
|
||||
if self.host is not None:
|
||||
oprot.writeFieldBegin('host', TType.STRUCT, 3)
|
||||
self.host.write(oprot)
|
||||
oprot.writeFieldEnd()
|
||||
oprot.writeFieldStop()
|
||||
oprot.writeStructEnd()
|
||||
|
||||
def validate(self):
|
||||
return
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
L = ['%s=%r' % (key, value)
|
||||
for key, value in self.__dict__.iteritems()]
|
||||
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
|
||||
class BinaryAnnotation:
|
||||
"""
|
||||
Attributes:
|
||||
- key
|
||||
- value
|
||||
- annotation_type
|
||||
- host
|
||||
"""
|
||||
|
||||
thrift_spec = (
|
||||
None, # 0
|
||||
(1, TType.STRING, 'key', None, None, ), # 1
|
||||
(2, TType.STRING, 'value', None, None, ), # 2
|
||||
(3, TType.I32, 'annotation_type', None, None, ), # 3
|
||||
(4, TType.STRUCT, 'host', (Endpoint, Endpoint.thrift_spec), None, ), # 4
|
||||
)
|
||||
|
||||
def __init__(self, key=None, value=None, annotation_type=None, host=None,):
|
||||
self.key = key
|
||||
self.value = value
|
||||
self.annotation_type = annotation_type
|
||||
self.host = host
|
||||
|
||||
def read(self, iprot):
|
||||
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
|
||||
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
|
||||
return
|
||||
iprot.readStructBegin()
|
||||
while True:
|
||||
(fname, ftype, fid) = iprot.readFieldBegin()
|
||||
if ftype == TType.STOP:
|
||||
break
|
||||
if fid == 1:
|
||||
if ftype == TType.STRING:
|
||||
self.key = iprot.readString();
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
elif fid == 2:
|
||||
if ftype == TType.STRING:
|
||||
self.value = iprot.readString();
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
elif fid == 3:
|
||||
if ftype == TType.I32:
|
||||
self.annotation_type = iprot.readI32();
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
elif fid == 4:
|
||||
if ftype == TType.STRUCT:
|
||||
self.host = Endpoint()
|
||||
self.host.read(iprot)
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
iprot.readFieldEnd()
|
||||
iprot.readStructEnd()
|
||||
|
||||
def write(self, oprot):
|
||||
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
|
||||
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
|
||||
return
|
||||
oprot.writeStructBegin('BinaryAnnotation')
|
||||
if self.key is not None:
|
||||
oprot.writeFieldBegin('key', TType.STRING, 1)
|
||||
oprot.writeString(self.key)
|
||||
oprot.writeFieldEnd()
|
||||
if self.value is not None:
|
||||
oprot.writeFieldBegin('value', TType.STRING, 2)
|
||||
oprot.writeString(self.value)
|
||||
oprot.writeFieldEnd()
|
||||
if self.annotation_type is not None:
|
||||
oprot.writeFieldBegin('annotation_type', TType.I32, 3)
|
||||
oprot.writeI32(self.annotation_type)
|
||||
oprot.writeFieldEnd()
|
||||
if self.host is not None:
|
||||
oprot.writeFieldBegin('host', TType.STRUCT, 4)
|
||||
self.host.write(oprot)
|
||||
oprot.writeFieldEnd()
|
||||
oprot.writeFieldStop()
|
||||
oprot.writeStructEnd()
|
||||
|
||||
def validate(self):
|
||||
return
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
L = ['%s=%r' % (key, value)
|
||||
for key, value in self.__dict__.iteritems()]
|
||||
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
|
||||
class Span:
|
||||
"""
|
||||
Attributes:
|
||||
- trace_id
|
||||
- name
|
||||
- id
|
||||
- parent_id
|
||||
- annotations
|
||||
- binary_annotations
|
||||
"""
|
||||
|
||||
thrift_spec = (
|
||||
None, # 0
|
||||
(1, TType.I64, 'trace_id', None, None, ), # 1
|
||||
None, # 2
|
||||
(3, TType.STRING, 'name', None, None, ), # 3
|
||||
(4, TType.I64, 'id', None, None, ), # 4
|
||||
(5, TType.I64, 'parent_id', None, None, ), # 5
|
||||
(6, TType.LIST, 'annotations', (TType.STRUCT,(Annotation, Annotation.thrift_spec)), None, ), # 6
|
||||
None, # 7
|
||||
(8, TType.LIST, 'binary_annotations', (TType.STRUCT,(BinaryAnnotation, BinaryAnnotation.thrift_spec)), None, ), # 8
|
||||
)
|
||||
|
||||
def __init__(self, trace_id=None, name=None, id=None, parent_id=None, annotations=None, binary_annotations=None,):
|
||||
self.trace_id = trace_id
|
||||
self.name = name
|
||||
self.id = id
|
||||
self.parent_id = parent_id
|
||||
self.annotations = annotations
|
||||
self.binary_annotations = binary_annotations
|
||||
|
||||
def read(self, iprot):
|
||||
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
|
||||
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
|
||||
return
|
||||
iprot.readStructBegin()
|
||||
while True:
|
||||
(fname, ftype, fid) = iprot.readFieldBegin()
|
||||
if ftype == TType.STOP:
|
||||
break
|
||||
if fid == 1:
|
||||
if ftype == TType.I64:
|
||||
self.trace_id = iprot.readI64();
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
elif fid == 3:
|
||||
if ftype == TType.STRING:
|
||||
self.name = iprot.readString();
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
elif fid == 4:
|
||||
if ftype == TType.I64:
|
||||
self.id = iprot.readI64();
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
elif fid == 5:
|
||||
if ftype == TType.I64:
|
||||
self.parent_id = iprot.readI64();
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
elif fid == 6:
|
||||
if ftype == TType.LIST:
|
||||
self.annotations = []
|
||||
(_etype3, _size0) = iprot.readListBegin()
|
||||
for _i4 in xrange(_size0):
|
||||
_elem5 = Annotation()
|
||||
_elem5.read(iprot)
|
||||
self.annotations.append(_elem5)
|
||||
iprot.readListEnd()
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
elif fid == 8:
|
||||
if ftype == TType.LIST:
|
||||
self.binary_annotations = []
|
||||
(_etype9, _size6) = iprot.readListBegin()
|
||||
for _i10 in xrange(_size6):
|
||||
_elem11 = BinaryAnnotation()
|
||||
_elem11.read(iprot)
|
||||
self.binary_annotations.append(_elem11)
|
||||
iprot.readListEnd()
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
else:
|
||||
iprot.skip(ftype)
|
||||
iprot.readFieldEnd()
|
||||
iprot.readStructEnd()
|
||||
|
||||
def write(self, oprot):
|
||||
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
|
||||
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
|
||||
return
|
||||
oprot.writeStructBegin('Span')
|
||||
if self.trace_id is not None:
|
||||
oprot.writeFieldBegin('trace_id', TType.I64, 1)
|
||||
oprot.writeI64(self.trace_id)
|
||||
oprot.writeFieldEnd()
|
||||
if self.name is not None:
|
||||
oprot.writeFieldBegin('name', TType.STRING, 3)
|
||||
oprot.writeString(self.name)
|
||||
oprot.writeFieldEnd()
|
||||
if self.id is not None:
|
||||
oprot.writeFieldBegin('id', TType.I64, 4)
|
||||
oprot.writeI64(self.id)
|
||||
oprot.writeFieldEnd()
|
||||
if self.parent_id is not None:
|
||||
oprot.writeFieldBegin('parent_id', TType.I64, 5)
|
||||
oprot.writeI64(self.parent_id)
|
||||
oprot.writeFieldEnd()
|
||||
if self.annotations is not None:
|
||||
oprot.writeFieldBegin('annotations', TType.LIST, 6)
|
||||
oprot.writeListBegin(TType.STRUCT, len(self.annotations))
|
||||
for iter12 in self.annotations:
|
||||
iter12.write(oprot)
|
||||
oprot.writeListEnd()
|
||||
oprot.writeFieldEnd()
|
||||
if self.binary_annotations is not None:
|
||||
oprot.writeFieldBegin('binary_annotations', TType.LIST, 8)
|
||||
oprot.writeListBegin(TType.STRUCT, len(self.binary_annotations))
|
||||
for iter13 in self.binary_annotations:
|
||||
iter13.write(oprot)
|
||||
oprot.writeListEnd()
|
||||
oprot.writeFieldEnd()
|
||||
oprot.writeFieldStop()
|
||||
oprot.writeStructEnd()
|
||||
|
||||
def validate(self):
|
||||
return
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
L = ['%s=%r' % (key, value)
|
||||
for key, value in self.__dict__.iteritems()]
|
||||
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
187
venv/lib/python3.12/site-packages/eventlet/zipkin/api.py
Normal file
187
venv/lib/python3.12/site-packages/eventlet/zipkin/api.py
Normal file
@ -0,0 +1,187 @@
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import struct
|
||||
import socket
|
||||
import random
|
||||
|
||||
from eventlet.green import threading
|
||||
from eventlet.zipkin._thrift.zipkinCore import ttypes
|
||||
from eventlet.zipkin._thrift.zipkinCore.constants import SERVER_SEND
|
||||
|
||||
|
||||
client = None
|
||||
_tls = threading.local() # thread local storage
|
||||
|
||||
|
||||
def put_annotation(msg, endpoint=None):
|
||||
""" This is annotation API.
|
||||
You can add your own annotation from in your code.
|
||||
Annotation is recorded with timestamp automatically.
|
||||
e.g.) put_annotation('cache hit for %s' % request)
|
||||
|
||||
:param msg: String message
|
||||
:param endpoint: host info
|
||||
"""
|
||||
if is_sample():
|
||||
a = ZipkinDataBuilder.build_annotation(msg, endpoint)
|
||||
trace_data = get_trace_data()
|
||||
trace_data.add_annotation(a)
|
||||
|
||||
|
||||
def put_key_value(key, value, endpoint=None):
|
||||
""" This is binary annotation API.
|
||||
You can add your own key-value extra information from in your code.
|
||||
Key-value doesn't have a time component.
|
||||
e.g.) put_key_value('http.uri', '/hoge/index.html')
|
||||
|
||||
:param key: String
|
||||
:param value: String
|
||||
:param endpoint: host info
|
||||
"""
|
||||
if is_sample():
|
||||
b = ZipkinDataBuilder.build_binary_annotation(key, value, endpoint)
|
||||
trace_data = get_trace_data()
|
||||
trace_data.add_binary_annotation(b)
|
||||
|
||||
|
||||
def is_tracing():
|
||||
""" Return whether the current thread is tracking or not """
|
||||
return hasattr(_tls, 'trace_data')
|
||||
|
||||
|
||||
def is_sample():
|
||||
""" Return whether it should record trace information
|
||||
for the request or not
|
||||
"""
|
||||
return is_tracing() and _tls.trace_data.sampled
|
||||
|
||||
|
||||
def get_trace_data():
|
||||
if is_tracing():
|
||||
return _tls.trace_data
|
||||
|
||||
|
||||
def set_trace_data(trace_data):
|
||||
_tls.trace_data = trace_data
|
||||
|
||||
|
||||
def init_trace_data():
|
||||
if is_tracing():
|
||||
del _tls.trace_data
|
||||
|
||||
|
||||
def _uniq_id():
|
||||
"""
|
||||
Create a random 64-bit signed integer appropriate
|
||||
for use as trace and span IDs.
|
||||
XXX: By experimentation zipkin has trouble recording traces with ids
|
||||
larger than (2 ** 56) - 1
|
||||
"""
|
||||
return random.randint(0, (2 ** 56) - 1)
|
||||
|
||||
|
||||
def generate_trace_id():
|
||||
return _uniq_id()
|
||||
|
||||
|
||||
def generate_span_id():
|
||||
return _uniq_id()
|
||||
|
||||
|
||||
class TraceData:
|
||||
|
||||
END_ANNOTATION = SERVER_SEND
|
||||
|
||||
def __init__(self, name, trace_id, span_id, parent_id, sampled, endpoint):
|
||||
"""
|
||||
:param name: RPC name (String)
|
||||
:param trace_id: int
|
||||
:param span_id: int
|
||||
:param parent_id: int or None
|
||||
:param sampled: lets the downstream servers know
|
||||
if I should record trace data for the request (bool)
|
||||
:param endpoint: zipkin._thrift.zipkinCore.ttypes.EndPoint
|
||||
"""
|
||||
self.name = name
|
||||
self.trace_id = trace_id
|
||||
self.span_id = span_id
|
||||
self.parent_id = parent_id
|
||||
self.sampled = sampled
|
||||
self.endpoint = endpoint
|
||||
self.annotations = []
|
||||
self.bannotations = []
|
||||
self._done = False
|
||||
|
||||
def add_annotation(self, annotation):
|
||||
if annotation.host is None:
|
||||
annotation.host = self.endpoint
|
||||
if not self._done:
|
||||
self.annotations.append(annotation)
|
||||
if annotation.value == self.END_ANNOTATION:
|
||||
self.flush()
|
||||
|
||||
def add_binary_annotation(self, bannotation):
|
||||
if bannotation.host is None:
|
||||
bannotation.host = self.endpoint
|
||||
if not self._done:
|
||||
self.bannotations.append(bannotation)
|
||||
|
||||
def flush(self):
|
||||
span = ZipkinDataBuilder.build_span(name=self.name,
|
||||
trace_id=self.trace_id,
|
||||
span_id=self.span_id,
|
||||
parent_id=self.parent_id,
|
||||
annotations=self.annotations,
|
||||
bannotations=self.bannotations)
|
||||
client.send_to_collector(span)
|
||||
self.annotations = []
|
||||
self.bannotations = []
|
||||
self._done = True
|
||||
|
||||
|
||||
class ZipkinDataBuilder:
|
||||
@staticmethod
|
||||
def build_span(name, trace_id, span_id, parent_id,
|
||||
annotations, bannotations):
|
||||
return ttypes.Span(
|
||||
name=name,
|
||||
trace_id=trace_id,
|
||||
id=span_id,
|
||||
parent_id=parent_id,
|
||||
annotations=annotations,
|
||||
binary_annotations=bannotations
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def build_annotation(value, endpoint=None):
|
||||
if isinstance(value, str):
|
||||
value = value.encode('utf-8')
|
||||
assert isinstance(value, bytes)
|
||||
return ttypes.Annotation(time.time() * 1000 * 1000,
|
||||
value, endpoint)
|
||||
|
||||
@staticmethod
|
||||
def build_binary_annotation(key, value, endpoint=None):
|
||||
annotation_type = ttypes.AnnotationType.STRING
|
||||
return ttypes.BinaryAnnotation(key, value, annotation_type, endpoint)
|
||||
|
||||
@staticmethod
|
||||
def build_endpoint(ipv4=None, port=None, service_name=None):
|
||||
if ipv4 is not None:
|
||||
ipv4 = ZipkinDataBuilder._ipv4_to_int(ipv4)
|
||||
if service_name is None:
|
||||
service_name = ZipkinDataBuilder._get_script_name()
|
||||
return ttypes.Endpoint(
|
||||
ipv4=ipv4,
|
||||
port=port,
|
||||
service_name=service_name
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _ipv4_to_int(ipv4):
|
||||
return struct.unpack('!i', socket.inet_aton(ipv4))[0]
|
||||
|
||||
@staticmethod
|
||||
def _get_script_name():
|
||||
return os.path.basename(sys.argv[0])
|
||||
56
venv/lib/python3.12/site-packages/eventlet/zipkin/client.py
Normal file
56
venv/lib/python3.12/site-packages/eventlet/zipkin/client.py
Normal file
@ -0,0 +1,56 @@
|
||||
import base64
|
||||
import warnings
|
||||
|
||||
from scribe import scribe
|
||||
from thrift.transport import TTransport, TSocket
|
||||
from thrift.protocol import TBinaryProtocol
|
||||
|
||||
from eventlet import GreenPile
|
||||
|
||||
|
||||
CATEGORY = 'zipkin'
|
||||
|
||||
|
||||
class ZipkinClient:
|
||||
|
||||
def __init__(self, host='127.0.0.1', port=9410):
|
||||
"""
|
||||
:param host: zipkin collector IP address (default '127.0.0.1')
|
||||
:param port: zipkin collector port (default 9410)
|
||||
"""
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.pile = GreenPile(1)
|
||||
self._connect()
|
||||
|
||||
def _connect(self):
|
||||
socket = TSocket.TSocket(self.host, self.port)
|
||||
self.transport = TTransport.TFramedTransport(socket)
|
||||
protocol = TBinaryProtocol.TBinaryProtocol(self.transport,
|
||||
False, False)
|
||||
self.scribe_client = scribe.Client(protocol)
|
||||
try:
|
||||
self.transport.open()
|
||||
except TTransport.TTransportException as e:
|
||||
warnings.warn(e.message)
|
||||
|
||||
def _build_message(self, thrift_obj):
|
||||
trans = TTransport.TMemoryBuffer()
|
||||
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(trans=trans)
|
||||
thrift_obj.write(protocol)
|
||||
return base64.b64encode(trans.getvalue())
|
||||
|
||||
def send_to_collector(self, span):
|
||||
self.pile.spawn(self._send, span)
|
||||
|
||||
def _send(self, span):
|
||||
log_entry = scribe.LogEntry(CATEGORY, self._build_message(span))
|
||||
try:
|
||||
self.scribe_client.Log([log_entry])
|
||||
except Exception as e:
|
||||
msg = 'ZipkinClient send error %s' % str(e)
|
||||
warnings.warn(msg)
|
||||
self._connect()
|
||||
|
||||
def close(self):
|
||||
self.transport.close()
|
||||
BIN
venv/lib/python3.12/site-packages/eventlet/zipkin/example/ex1.png
Executable file
BIN
venv/lib/python3.12/site-packages/eventlet/zipkin/example/ex1.png
Executable file
Binary file not shown.
|
After Width: | Height: | Size: 52 KiB |
BIN
venv/lib/python3.12/site-packages/eventlet/zipkin/example/ex2.png
Executable file
BIN
venv/lib/python3.12/site-packages/eventlet/zipkin/example/ex2.png
Executable file
Binary file not shown.
|
After Width: | Height: | Size: 40 KiB |
BIN
venv/lib/python3.12/site-packages/eventlet/zipkin/example/ex3.png
Executable file
BIN
venv/lib/python3.12/site-packages/eventlet/zipkin/example/ex3.png
Executable file
Binary file not shown.
|
After Width: | Height: | Size: 72 KiB |
@ -0,0 +1,33 @@
|
||||
from eventlet import greenthread
|
||||
|
||||
from eventlet.zipkin import api
|
||||
|
||||
|
||||
__original_init__ = greenthread.GreenThread.__init__
|
||||
__original_main__ = greenthread.GreenThread.main
|
||||
|
||||
|
||||
def _patched__init(self, parent):
|
||||
# parent thread saves current TraceData from tls to self
|
||||
if api.is_tracing():
|
||||
self.trace_data = api.get_trace_data()
|
||||
|
||||
__original_init__(self, parent)
|
||||
|
||||
|
||||
def _patched_main(self, function, args, kwargs):
|
||||
# child thread inherits TraceData
|
||||
if hasattr(self, 'trace_data'):
|
||||
api.set_trace_data(self.trace_data)
|
||||
|
||||
__original_main__(self, function, args, kwargs)
|
||||
|
||||
|
||||
def patch():
|
||||
greenthread.GreenThread.__init__ = _patched__init
|
||||
greenthread.GreenThread.main = _patched_main
|
||||
|
||||
|
||||
def unpatch():
|
||||
greenthread.GreenThread.__init__ = __original_init__
|
||||
greenthread.GreenThread.main = __original_main__
|
||||
29
venv/lib/python3.12/site-packages/eventlet/zipkin/http.py
Normal file
29
venv/lib/python3.12/site-packages/eventlet/zipkin/http.py
Normal file
@ -0,0 +1,29 @@
|
||||
import warnings
|
||||
|
||||
from eventlet.green import httplib
|
||||
from eventlet.zipkin import api
|
||||
|
||||
|
||||
# see https://twitter.github.io/zipkin/Instrumenting.html
|
||||
HDR_TRACE_ID = 'X-B3-TraceId'
|
||||
HDR_SPAN_ID = 'X-B3-SpanId'
|
||||
HDR_PARENT_SPAN_ID = 'X-B3-ParentSpanId'
|
||||
HDR_SAMPLED = 'X-B3-Sampled'
|
||||
|
||||
|
||||
def patch():
|
||||
warnings.warn("Since current Python thrift release \
|
||||
doesn't support Python 3, eventlet.zipkin.http \
|
||||
doesn't also support Python 3 (http.client)")
|
||||
|
||||
|
||||
def unpatch():
|
||||
pass
|
||||
|
||||
|
||||
def hex_str(n):
|
||||
"""
|
||||
Thrift uses a binary representation of trace and span ids
|
||||
HTTP headers use a hexadecimal representation of the same
|
||||
"""
|
||||
return '%0.16x' % (n,)
|
||||
19
venv/lib/python3.12/site-packages/eventlet/zipkin/log.py
Normal file
19
venv/lib/python3.12/site-packages/eventlet/zipkin/log.py
Normal file
@ -0,0 +1,19 @@
|
||||
import logging
|
||||
|
||||
from eventlet.zipkin import api
|
||||
|
||||
|
||||
__original_handle__ = logging.Logger.handle
|
||||
|
||||
|
||||
def _patched_handle(self, record):
|
||||
__original_handle__(self, record)
|
||||
api.put_annotation(record.getMessage())
|
||||
|
||||
|
||||
def patch():
|
||||
logging.Logger.handle = _patched_handle
|
||||
|
||||
|
||||
def unpatch():
|
||||
logging.Logger.handle = __original_handle__
|
||||
41
venv/lib/python3.12/site-packages/eventlet/zipkin/patcher.py
Normal file
41
venv/lib/python3.12/site-packages/eventlet/zipkin/patcher.py
Normal file
@ -0,0 +1,41 @@
|
||||
from eventlet.zipkin import http
|
||||
from eventlet.zipkin import wsgi
|
||||
from eventlet.zipkin import greenthread
|
||||
from eventlet.zipkin import log
|
||||
from eventlet.zipkin import api
|
||||
from eventlet.zipkin.client import ZipkinClient
|
||||
|
||||
|
||||
def enable_trace_patch(host='127.0.0.1', port=9410,
|
||||
trace_app_log=False, sampling_rate=1.0):
|
||||
""" Apply monkey patch to trace your WSGI application.
|
||||
|
||||
:param host: Scribe daemon IP address (default: '127.0.0.1')
|
||||
:param port: Scribe daemon port (default: 9410)
|
||||
:param trace_app_log: A Boolean indicating if the tracer will trace
|
||||
application log together or not. This facility assume that
|
||||
your application uses python standard logging library.
|
||||
(default: False)
|
||||
:param sampling_rate: A Float value (0.0~1.0) that indicates
|
||||
the tracing frequency. If you specify 1.0, all request
|
||||
are traced (and sent to Zipkin collecotr).
|
||||
If you specify 0.1, only 1/10 requests are traced. (default: 1.0)
|
||||
"""
|
||||
api.client = ZipkinClient(host, port)
|
||||
|
||||
# monkey patch for adding tracing facility
|
||||
wsgi.patch(sampling_rate)
|
||||
http.patch()
|
||||
greenthread.patch()
|
||||
|
||||
# monkey patch for capturing application log
|
||||
if trace_app_log:
|
||||
log.patch()
|
||||
|
||||
|
||||
def disable_trace_patch():
|
||||
http.unpatch()
|
||||
wsgi.unpatch()
|
||||
greenthread.unpatch()
|
||||
log.unpatch()
|
||||
api.client.close()
|
||||
78
venv/lib/python3.12/site-packages/eventlet/zipkin/wsgi.py
Normal file
78
venv/lib/python3.12/site-packages/eventlet/zipkin/wsgi.py
Normal file
@ -0,0 +1,78 @@
|
||||
import random
|
||||
|
||||
from eventlet import wsgi
|
||||
from eventlet.zipkin import api
|
||||
from eventlet.zipkin._thrift.zipkinCore.constants import \
|
||||
SERVER_RECV, SERVER_SEND
|
||||
from eventlet.zipkin.http import \
|
||||
HDR_TRACE_ID, HDR_SPAN_ID, HDR_PARENT_SPAN_ID, HDR_SAMPLED
|
||||
|
||||
|
||||
_sampler = None
|
||||
__original_handle_one_response__ = wsgi.HttpProtocol.handle_one_response
|
||||
|
||||
|
||||
def _patched_handle_one_response(self):
|
||||
api.init_trace_data()
|
||||
trace_id = int_or_none(self.headers.getheader(HDR_TRACE_ID))
|
||||
span_id = int_or_none(self.headers.getheader(HDR_SPAN_ID))
|
||||
parent_id = int_or_none(self.headers.getheader(HDR_PARENT_SPAN_ID))
|
||||
sampled = bool_or_none(self.headers.getheader(HDR_SAMPLED))
|
||||
if trace_id is None: # front-end server
|
||||
trace_id = span_id = api.generate_trace_id()
|
||||
parent_id = None
|
||||
sampled = _sampler.sampling()
|
||||
ip, port = self.request.getsockname()[:2]
|
||||
ep = api.ZipkinDataBuilder.build_endpoint(ip, port)
|
||||
trace_data = api.TraceData(name=self.command,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
parent_id=parent_id,
|
||||
sampled=sampled,
|
||||
endpoint=ep)
|
||||
api.set_trace_data(trace_data)
|
||||
api.put_annotation(SERVER_RECV)
|
||||
api.put_key_value('http.uri', self.path)
|
||||
|
||||
__original_handle_one_response__(self)
|
||||
|
||||
if api.is_sample():
|
||||
api.put_annotation(SERVER_SEND)
|
||||
|
||||
|
||||
class Sampler:
|
||||
def __init__(self, sampling_rate):
|
||||
self.sampling_rate = sampling_rate
|
||||
|
||||
def sampling(self):
|
||||
# avoid generating unneeded random numbers
|
||||
if self.sampling_rate == 1.0:
|
||||
return True
|
||||
r = random.random()
|
||||
if r < self.sampling_rate:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def int_or_none(val):
|
||||
if val is None:
|
||||
return None
|
||||
return int(val, 16)
|
||||
|
||||
|
||||
def bool_or_none(val):
|
||||
if val == '1':
|
||||
return True
|
||||
if val == '0':
|
||||
return False
|
||||
return None
|
||||
|
||||
|
||||
def patch(sampling_rate):
|
||||
global _sampler
|
||||
_sampler = Sampler(sampling_rate)
|
||||
wsgi.HttpProtocol.handle_one_response = _patched_handle_one_response
|
||||
|
||||
|
||||
def unpatch():
|
||||
wsgi.HttpProtocol.handle_one_response = __original_handle_one_response__
|
||||
Reference in New Issue
Block a user