This commit is contained in:
2024-11-29 18:15:30 +00:00
parent 40aade2d8e
commit bc9415586e
5298 changed files with 1938676 additions and 80 deletions

View File

@ -0,0 +1,15 @@
from eventlet import patcher
from eventlet.green import socket
from eventlet.green import SocketServer
patcher.inject(
'http.server',
globals(),
('socket', socket),
('SocketServer', SocketServer),
('socketserver', SocketServer))
del patcher
if __name__ == '__main__':
test()

View File

@ -0,0 +1,17 @@
from eventlet import patcher
from eventlet.green import BaseHTTPServer
from eventlet.green import SimpleHTTPServer
from eventlet.green import urllib
from eventlet.green import select
test = None # bind prior to patcher.inject to silence pyflakes warning below
patcher.inject(
'http.server',
globals(),
('urllib', urllib),
('select', select))
del patcher
if __name__ == '__main__':
test() # pyflakes false alarm here unless test = None above

View File

@ -0,0 +1,40 @@
__MySQLdb = __import__('MySQLdb')
__all__ = __MySQLdb.__all__
__patched__ = ["connect", "Connect", 'Connection', 'connections']
from eventlet.patcher import slurp_properties
slurp_properties(
__MySQLdb, globals(),
ignore=__patched__, srckeys=dir(__MySQLdb))
from eventlet import tpool
__orig_connections = __import__('MySQLdb.connections').connections
def Connection(*args, **kw):
conn = tpool.execute(__orig_connections.Connection, *args, **kw)
return tpool.Proxy(conn, autowrap_names=('cursor',))
connect = Connect = Connection
# replicate the MySQLdb.connections module but with a tpooled Connection factory
class MySQLdbConnectionsModule:
pass
connections = MySQLdbConnectionsModule()
for var in dir(__orig_connections):
if not var.startswith('__'):
setattr(connections, var, getattr(__orig_connections, var))
connections.Connection = Connection
cursors = __import__('MySQLdb.cursors').cursors
converters = __import__('MySQLdb.converters').converters
# TODO support instantiating cursors.FooCursor objects directly
# TODO though this is a low priority, it would be nice if we supported
# subclassing eventlet.green.MySQLdb.connections.Connection

View File

@ -0,0 +1,125 @@
from OpenSSL import SSL as orig_SSL
from OpenSSL.SSL import *
from eventlet.support import get_errno
from eventlet import greenio
from eventlet.hubs import trampoline
import socket
class GreenConnection(greenio.GreenSocket):
""" Nonblocking wrapper for SSL.Connection objects.
"""
def __init__(self, ctx, sock=None):
if sock is not None:
fd = orig_SSL.Connection(ctx, sock)
else:
# if we're given a Connection object directly, use it;
# this is used in the inherited accept() method
fd = ctx
super(ConnectionType, self).__init__(fd)
def do_handshake(self):
""" Perform an SSL handshake (usually called after renegotiate or one of
set_accept_state or set_accept_state). This can raise the same exceptions as
send and recv. """
if self.act_non_blocking:
return self.fd.do_handshake()
while True:
try:
return self.fd.do_handshake()
except WantReadError:
trampoline(self.fd.fileno(),
read=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
except WantWriteError:
trampoline(self.fd.fileno(),
write=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
def dup(self):
raise NotImplementedError("Dup not supported on SSL sockets")
def makefile(self, mode='r', bufsize=-1):
raise NotImplementedError("Makefile not supported on SSL sockets")
def read(self, size):
"""Works like a blocking call to SSL_read(), whose behavior is
described here: http://www.openssl.org/docs/ssl/SSL_read.html"""
if self.act_non_blocking:
return self.fd.read(size)
while True:
try:
return self.fd.read(size)
except WantReadError:
trampoline(self.fd.fileno(),
read=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
except WantWriteError:
trampoline(self.fd.fileno(),
write=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
except SysCallError as e:
if get_errno(e) == -1 or get_errno(e) > 0:
return ''
recv = read
def write(self, data):
"""Works like a blocking call to SSL_write(), whose behavior is
described here: http://www.openssl.org/docs/ssl/SSL_write.html"""
if not data:
return 0 # calling SSL_write() with 0 bytes to be sent is undefined
if self.act_non_blocking:
return self.fd.write(data)
while True:
try:
return self.fd.write(data)
except WantReadError:
trampoline(self.fd.fileno(),
read=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
except WantWriteError:
trampoline(self.fd.fileno(),
write=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
send = write
def sendall(self, data):
"""Send "all" data on the connection. This calls send() repeatedly until
all data is sent. If an error occurs, it's impossible to tell how much data
has been sent.
No return value."""
tail = self.send(data)
while tail < len(data):
tail += self.send(data[tail:])
def shutdown(self):
if self.act_non_blocking:
return self.fd.shutdown()
while True:
try:
return self.fd.shutdown()
except WantReadError:
trampoline(self.fd.fileno(),
read=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
except WantWriteError:
trampoline(self.fd.fileno(),
write=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
Connection = ConnectionType = GreenConnection
del greenio

View File

@ -0,0 +1,9 @@
from . import crypto
from . import SSL
try:
# pyopenssl tsafe module was deprecated and removed in v20.0.0
# https://github.com/pyca/pyopenssl/pull/913
from . import tsafe
except ImportError:
pass
from .version import __version__

View File

@ -0,0 +1 @@
from OpenSSL.crypto import *

View File

@ -0,0 +1 @@
from OpenSSL.tsafe import *

View File

@ -0,0 +1 @@
from OpenSSL.version import __version__, __doc__

View File

@ -0,0 +1,33 @@
from eventlet import queue
__all__ = ['Empty', 'Full', 'LifoQueue', 'PriorityQueue', 'Queue']
__patched__ = ['LifoQueue', 'PriorityQueue', 'Queue']
# these classes exist to paper over the major operational difference between
# eventlet.queue.Queue and the stdlib equivalents
class Queue(queue.Queue):
def __init__(self, maxsize=0):
if maxsize == 0:
maxsize = None
super().__init__(maxsize)
class PriorityQueue(queue.PriorityQueue):
def __init__(self, maxsize=0):
if maxsize == 0:
maxsize = None
super().__init__(maxsize)
class LifoQueue(queue.LifoQueue):
def __init__(self, maxsize=0):
if maxsize == 0:
maxsize = None
super().__init__(maxsize)
Empty = queue.Empty
Full = queue.Full

View File

@ -0,0 +1,13 @@
from eventlet import patcher
from eventlet.green import BaseHTTPServer
from eventlet.green import urllib
patcher.inject(
'http.server',
globals(),
('urllib', urllib))
del patcher
if __name__ == '__main__':
test()

View File

@ -0,0 +1,14 @@
from eventlet import patcher
from eventlet.green import socket
from eventlet.green import select
from eventlet.green import threading
patcher.inject(
'socketserver',
globals(),
('socket', socket),
('select', select),
('threading', threading))
# QQQ ForkingMixIn should be fixed to use green waitpid?

View File

@ -0,0 +1 @@
# this package contains modules from the standard library converted to use eventlet

View File

@ -0,0 +1,33 @@
__socket = __import__('socket')
__all__ = __socket.__all__
__patched__ = ['fromfd', 'socketpair', 'ssl', 'socket', 'timeout']
import eventlet.patcher
eventlet.patcher.slurp_properties(__socket, globals(), ignore=__patched__, srckeys=dir(__socket))
os = __import__('os')
import sys
from eventlet import greenio
socket = greenio.GreenSocket
_GLOBAL_DEFAULT_TIMEOUT = greenio._GLOBAL_DEFAULT_TIMEOUT
timeout = greenio.socket_timeout
try:
__original_fromfd__ = __socket.fromfd
def fromfd(*args):
return socket(__original_fromfd__(*args))
except AttributeError:
pass
try:
__original_socketpair__ = __socket.socketpair
def socketpair(*args):
one, two = __original_socketpair__(*args)
return socket(one), socket(two)
except AttributeError:
pass

View File

@ -0,0 +1,14 @@
import sys
if sys.version_info < (3, 12):
from eventlet import patcher
from eventlet.green import asyncore
from eventlet.green import socket
patcher.inject(
'asynchat',
globals(),
('asyncore', asyncore),
('socket', socket))
del patcher

View File

@ -0,0 +1,16 @@
import sys
if sys.version_info < (3, 12):
from eventlet import patcher
from eventlet.green import select
from eventlet.green import socket
from eventlet.green import time
patcher.inject(
"asyncore",
globals(),
('select', select),
('socket', socket),
('time', time))
del patcher

View File

@ -0,0 +1,38 @@
"""
In order to detect a filehandle that's been closed, our only clue may be
the operating system returning the same filehandle in response to some
other operation.
The builtins 'file' and 'open' are patched to collaborate with the
notify_opened protocol.
"""
builtins_orig = __builtins__
from eventlet import hubs
from eventlet.hubs import hub
from eventlet.patcher import slurp_properties
import sys
__all__ = dir(builtins_orig)
__patched__ = ['open']
slurp_properties(builtins_orig, globals(),
ignore=__patched__, srckeys=dir(builtins_orig))
hubs.get_hub()
__original_open = open
__opening = False
def open(*args, **kwargs):
global __opening
result = __original_open(*args, **kwargs)
if not __opening:
# This is incredibly ugly. 'open' is used under the hood by
# the import process. So, ensure we don't wind up in an
# infinite loop.
__opening = True
hubs.notify_opened(result.fileno())
__opening = False
return result

View File

@ -0,0 +1,13 @@
from eventlet import patcher
# *NOTE: there might be some funny business with the "SOCKS" module
# if it even still exists
from eventlet.green import socket
patcher.inject('ftplib', globals(), ('socket', socket))
del patcher
# Run test program when run as a script
if __name__ == '__main__':
test()

View File

@ -0,0 +1,189 @@
# This is part of Python source code with Eventlet-specific modifications.
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
# Reserved
#
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
# Reserved" are retained in Python alone or in any derivative version prepared by
# Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
from enum import IntEnum
__all__ = ['HTTPStatus']
class HTTPStatus(IntEnum):
"""HTTP status codes and reason phrases
Status codes from the following RFCs are all observed:
* RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616
* RFC 6585: Additional HTTP Status Codes
* RFC 3229: Delta encoding in HTTP
* RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518
* RFC 5842: Binding Extensions to WebDAV
* RFC 7238: Permanent Redirect
* RFC 2295: Transparent Content Negotiation in HTTP
* RFC 2774: An HTTP Extension Framework
"""
def __new__(cls, value, phrase, description=''):
obj = int.__new__(cls, value)
obj._value_ = value
obj.phrase = phrase
obj.description = description
return obj
# informational
CONTINUE = 100, 'Continue', 'Request received, please continue'
SWITCHING_PROTOCOLS = (101, 'Switching Protocols',
'Switching to new protocol; obey Upgrade header')
PROCESSING = 102, 'Processing'
# success
OK = 200, 'OK', 'Request fulfilled, document follows'
CREATED = 201, 'Created', 'Document created, URL follows'
ACCEPTED = (202, 'Accepted',
'Request accepted, processing continues off-line')
NON_AUTHORITATIVE_INFORMATION = (203,
'Non-Authoritative Information', 'Request fulfilled from cache')
NO_CONTENT = 204, 'No Content', 'Request fulfilled, nothing follows'
RESET_CONTENT = 205, 'Reset Content', 'Clear input form for further input'
PARTIAL_CONTENT = 206, 'Partial Content', 'Partial content follows'
MULTI_STATUS = 207, 'Multi-Status'
ALREADY_REPORTED = 208, 'Already Reported'
IM_USED = 226, 'IM Used'
# redirection
MULTIPLE_CHOICES = (300, 'Multiple Choices',
'Object has several resources -- see URI list')
MOVED_PERMANENTLY = (301, 'Moved Permanently',
'Object moved permanently -- see URI list')
FOUND = 302, 'Found', 'Object moved temporarily -- see URI list'
SEE_OTHER = 303, 'See Other', 'Object moved -- see Method and URL list'
NOT_MODIFIED = (304, 'Not Modified',
'Document has not changed since given time')
USE_PROXY = (305, 'Use Proxy',
'You must use proxy specified in Location to access this resource')
TEMPORARY_REDIRECT = (307, 'Temporary Redirect',
'Object moved temporarily -- see URI list')
PERMANENT_REDIRECT = (308, 'Permanent Redirect',
'Object moved temporarily -- see URI list')
# client error
BAD_REQUEST = (400, 'Bad Request',
'Bad request syntax or unsupported method')
UNAUTHORIZED = (401, 'Unauthorized',
'No permission -- see authorization schemes')
PAYMENT_REQUIRED = (402, 'Payment Required',
'No payment -- see charging schemes')
FORBIDDEN = (403, 'Forbidden',
'Request forbidden -- authorization will not help')
NOT_FOUND = (404, 'Not Found',
'Nothing matches the given URI')
METHOD_NOT_ALLOWED = (405, 'Method Not Allowed',
'Specified method is invalid for this resource')
NOT_ACCEPTABLE = (406, 'Not Acceptable',
'URI not available in preferred format')
PROXY_AUTHENTICATION_REQUIRED = (407,
'Proxy Authentication Required',
'You must authenticate with this proxy before proceeding')
REQUEST_TIMEOUT = (408, 'Request Timeout',
'Request timed out; try again later')
CONFLICT = 409, 'Conflict', 'Request conflict'
GONE = (410, 'Gone',
'URI no longer exists and has been permanently removed')
LENGTH_REQUIRED = (411, 'Length Required',
'Client must specify Content-Length')
PRECONDITION_FAILED = (412, 'Precondition Failed',
'Precondition in headers is false')
REQUEST_ENTITY_TOO_LARGE = (413, 'Request Entity Too Large',
'Entity is too large')
REQUEST_URI_TOO_LONG = (414, 'Request-URI Too Long',
'URI is too long')
UNSUPPORTED_MEDIA_TYPE = (415, 'Unsupported Media Type',
'Entity body in unsupported format')
REQUESTED_RANGE_NOT_SATISFIABLE = (416,
'Requested Range Not Satisfiable',
'Cannot satisfy request range')
EXPECTATION_FAILED = (417, 'Expectation Failed',
'Expect condition could not be satisfied')
UNPROCESSABLE_ENTITY = 422, 'Unprocessable Entity'
LOCKED = 423, 'Locked'
FAILED_DEPENDENCY = 424, 'Failed Dependency'
UPGRADE_REQUIRED = 426, 'Upgrade Required'
PRECONDITION_REQUIRED = (428, 'Precondition Required',
'The origin server requires the request to be conditional')
TOO_MANY_REQUESTS = (429, 'Too Many Requests',
'The user has sent too many requests in '
'a given amount of time ("rate limiting")')
REQUEST_HEADER_FIELDS_TOO_LARGE = (431,
'Request Header Fields Too Large',
'The server is unwilling to process the request because its header '
'fields are too large')
# server errors
INTERNAL_SERVER_ERROR = (500, 'Internal Server Error',
'Server got itself in trouble')
NOT_IMPLEMENTED = (501, 'Not Implemented',
'Server does not support this operation')
BAD_GATEWAY = (502, 'Bad Gateway',
'Invalid responses from another server/proxy')
SERVICE_UNAVAILABLE = (503, 'Service Unavailable',
'The server cannot process the request due to a high load')
GATEWAY_TIMEOUT = (504, 'Gateway Timeout',
'The gateway server did not receive a timely response')
HTTP_VERSION_NOT_SUPPORTED = (505, 'HTTP Version Not Supported',
'Cannot fulfill request')
VARIANT_ALSO_NEGOTIATES = 506, 'Variant Also Negotiates'
INSUFFICIENT_STORAGE = 507, 'Insufficient Storage'
LOOP_DETECTED = 508, 'Loop Detected'
NOT_EXTENDED = 510, 'Not Extended'
NETWORK_AUTHENTICATION_REQUIRED = (511,
'Network Authentication Required',
'The client needs to authenticate to gain network access')

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,691 @@
# This is part of Python source code with Eventlet-specific modifications.
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
# Reserved
#
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
# Reserved" are retained in Python alone or in any derivative version prepared by
# Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
####
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <timo@alum.mit.edu>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy...
>>> from http import cookies
Most of the time you start by creating a cookie.
>>> C = cookies.SimpleCookie()
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = cookies.SimpleCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = cookies.SimpleCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print(C.output(header="Cookie:"))
Cookie: rocky=road; Path=/cookie
>>> print(C.output(attrs=[], header="Cookie:"))
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = cookies.SimpleCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = cookies.SimpleCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print(C)
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = cookies.SimpleCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print(C)
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = cookies.SimpleCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = cookies.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
Finis.
"""
#
# Import our required modules
#
import re
import string
__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
def _warn_deprecated_setter(setter):
import warnings
msg = ('The .%s setter is deprecated. The attribute will be read-only in '
'future releases. Please use the set() method instead.' % setter)
warnings.warn(msg, DeprecationWarning, stacklevel=3)
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceding '\' slash.
# Because of the way browsers really handle cookies (as opposed to what
# the RFC says) we also encode "," and ";".
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:"
_UnescapedChars = _LegalChars + ' ()/<=>?@[]{}'
_Translator = {n: '\\%03o' % n
for n in set(range(256)) - set(map(ord, _UnescapedChars))}
_Translator.update({
ord('"'): '\\"',
ord('\\'): '\\\\',
})
# Eventlet change: match used instead of fullmatch for Python 3.3 compatibility
_is_legal_key = re.compile(r'[%s]+\Z' % re.escape(_LegalChars)).match
def _quote(str):
r"""Quote a string for use in a cookie header.
If the string does not need to be double-quoted, then just return the
string. Otherwise, surround the string in doublequotes and quote
(with a \) special characters.
"""
if str is None or _is_legal_key(str):
return str
else:
return '"' + str.translate(_Translator) + '"'
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if str is None or len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
o_match = _OctalPatt.search(str, i)
q_match = _QuotePatt.search(str, i)
if not o_match and not q_match: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k + 2
else: # OctalPatt matched
res.append(str[i:j])
res.append(chr(int(str[j+1:j+4], 8)))
i = j + 4
return _nulljoin(res)
# The _getdate() routine is used to set the expiration time in the cookie's HTTP
# header. By default, _getdate() returns the current time in the appropriate
# "expires" format for a Set-Cookie header. The one optional argument is an
# offset from now, in seconds. For example, an offset of -3600 means "one hour
# ago". The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from eventlet.green.time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
class Morsel(dict):
"""A class to hold ONE (key, value) pair.
In a cookie, each such pair may have several attributes, so this class is
used to keep the attributes associated with the appropriate key,value pair.
This class also includes a coded_value attribute, which is used to hold
the network representation of the value. This is most useful when Python
objects are pickled for network transit.
"""
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = {
"expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "Secure",
"httponly" : "HttpOnly",
"version" : "Version",
}
_flags = {'secure', 'httponly'}
def __init__(self):
# Set defaults
self._key = self._value = self._coded_value = None
# Set default attributes
for key in self._reserved:
dict.__setitem__(self, key, "")
@property
def key(self):
return self._key
@key.setter
def key(self, key):
_warn_deprecated_setter('key')
self._key = key
@property
def value(self):
return self._value
@value.setter
def value(self, value):
_warn_deprecated_setter('value')
self._value = value
@property
def coded_value(self):
return self._coded_value
@coded_value.setter
def coded_value(self, coded_value):
_warn_deprecated_setter('coded_value')
self._coded_value = coded_value
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid attribute %r" % (K,))
dict.__setitem__(self, K, V)
def setdefault(self, key, val=None):
key = key.lower()
if key not in self._reserved:
raise CookieError("Invalid attribute %r" % (key,))
return dict.setdefault(self, key, val)
def __eq__(self, morsel):
if not isinstance(morsel, Morsel):
return NotImplemented
return (dict.__eq__(self, morsel) and
self._value == morsel._value and
self._key == morsel._key and
self._coded_value == morsel._coded_value)
__ne__ = object.__ne__
def copy(self):
morsel = Morsel()
dict.update(morsel, self)
morsel.__dict__.update(self.__dict__)
return morsel
def update(self, values):
data = {}
for key, val in dict(values).items():
key = key.lower()
if key not in self._reserved:
raise CookieError("Invalid attribute %r" % (key,))
data[key] = val
dict.update(self, data)
def isReservedKey(self, K):
return K.lower() in self._reserved
def set(self, key, val, coded_val, LegalChars=_LegalChars):
if LegalChars != _LegalChars:
import warnings
warnings.warn(
'LegalChars parameter is deprecated, ignored and will '
'be removed in future versions.', DeprecationWarning,
stacklevel=2)
if key.lower() in self._reserved:
raise CookieError('Attempt to set a reserved key %r' % (key,))
if not _is_legal_key(key):
raise CookieError('Illegal key %r' % (key,))
# It's a good key, so save it.
self._key = key
self._value = val
self._coded_value = coded_val
def __getstate__(self):
return {
'key': self._key,
'value': self._value,
'coded_value': self._coded_value,
}
def __setstate__(self, state):
self._key = state['key']
self._value = state['value']
self._coded_value = state['coded_value']
def output(self, attrs=None, header="Set-Cookie:"):
return "%s %s" % (header, self.OutputString(attrs))
__str__ = output
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.OutputString())
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % (self.OutputString(attrs).replace('"', r'\"'))
def OutputString(self, attrs=None):
# Build up our result
#
result = []
append = result.append
# First, the key=value pair
append("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = sorted(self.items())
for key, value in items:
if value == "":
continue
if key not in attrs:
continue
if key == "expires" and isinstance(value, int):
append("%s=%s" % (self._reserved[key], _getdate(value)))
elif key == "max-age" and isinstance(value, int):
append("%s=%d" % (self._reserved[key], value))
elif key in self._flags:
if value:
append(str(self._reserved[key]))
else:
append("%s=%s" % (self._reserved[key], value))
# Return the result
return _semispacejoin(result)
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\="
_LegalValueChars = _LegalKeyChars + r'\[\]'
_CookiePattern = re.compile(r"""
(?x) # This is a verbose pattern
\s* # Optional whitespace at start of cookie
(?P<key> # Start of group 'key'
[""" + _LegalKeyChars + r"""]+? # Any word of at least one letter
) # End of group 'key'
( # Optional group: there may not be a value.
\s*=\s* # Equal Sign
(?P<val> # Start of group 'val'
"(?:[^\\"]|\\.)*" # Any doublequoted string
| # or
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
| # or
[""" + _LegalValueChars + r"""]* # Any word or empty string
) # End of group 'val'
)? # End of optional value group
\s* # Any number of spaces.
(\s+|;|$) # Ending either at space, semicolon, or EOS.
""", re.ASCII) # May be removed if safe.
# At long last, here is the cookie class. Using this class is almost just like
# using a dictionary. See this module's docstring for example usage.
#
class BaseCookie(dict):
"""A container class for a set of Morsels."""
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
def __init__(self, input=None):
if input:
self.load(input)
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
"""Dictionary style assignment."""
if isinstance(value, Morsel):
# allow assignment of constructed Morsels (e.g. for pickling)
dict.__setitem__(self, key, value)
else:
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.output(attrs, header))
return sep.join(result)
__str__ = output
def __repr__(self):
l = []
items = sorted(self.items())
for key, value in items:
l.append('%s=%s' % (key, repr(value.value)))
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.js_output(attrs))
return _nulljoin(result)
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if isinstance(rawdata, str):
self.__parse_string(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for key, value in rawdata.items():
self[key] = value
return
def __parse_string(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
parsed_items = [] # Parsed (type, key, value) triples
morsel_seen = False # A key=value pair was previously encountered
TYPE_ATTRIBUTE = 1
TYPE_KEYVALUE = 2
# We first parse the whole cookie string and reject it if it's
# syntactically invalid (this helps avoid some classes of injection
# attacks).
while 0 <= i < n:
# Start looking for a cookie
match = patt.match(str, i)
if not match:
# No more cookies
break
key, value = match.group("key"), match.group("val")
i = match.end(0)
if key[0] == "$":
if not morsel_seen:
# We ignore attributes which pertain to the cookie
# mechanism as a whole, such as "$Version".
# See RFC 2965. (Does anyone care?)
continue
parsed_items.append((TYPE_ATTRIBUTE, key[1:], value))
elif key.lower() in Morsel._reserved:
if not morsel_seen:
# Invalid cookie string
return
if value is None:
if key.lower() in Morsel._flags:
parsed_items.append((TYPE_ATTRIBUTE, key, True))
else:
# Invalid cookie string
return
else:
parsed_items.append((TYPE_ATTRIBUTE, key, _unquote(value)))
elif value is not None:
parsed_items.append((TYPE_KEYVALUE, key, self.value_decode(value)))
morsel_seen = True
else:
# Invalid cookie string
return
# The cookie string is valid, apply it.
M = None # current morsel
for tp, key, value in parsed_items:
if tp == TYPE_ATTRIBUTE:
assert M is not None
M[key] = value
else:
assert tp == TYPE_KEYVALUE
rval, cval = value
self.__set(key, rval, cval)
M = self[key]
class SimpleCookie(BaseCookie):
"""
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote(val), val
def value_encode(self, val):
strval = str(val)
return strval, _quote(strval)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,18 @@
from eventlet import patcher
from eventlet.green import socket
to_patch = [('socket', socket)]
try:
from eventlet.green import ssl
to_patch.append(('ssl', ssl))
except ImportError:
pass
from eventlet.green.http import client
for name in dir(client):
if name not in patcher.__exclude:
globals()[name] = getattr(client, name)
if __name__ == '__main__':
test()

View File

@ -0,0 +1,133 @@
os_orig = __import__("os")
import errno
socket = __import__("socket")
from stat import S_ISREG
from eventlet import greenio
from eventlet.support import get_errno
from eventlet import greenthread
from eventlet import hubs
from eventlet.patcher import slurp_properties
__all__ = os_orig.__all__
__patched__ = ['fdopen', 'read', 'write', 'wait', 'waitpid', 'open']
slurp_properties(
os_orig,
globals(),
ignore=__patched__,
srckeys=dir(os_orig))
def fdopen(fd, *args, **kw):
"""fdopen(fd [, mode='r' [, bufsize]]) -> file_object
Return an open file object connected to a file descriptor."""
if not isinstance(fd, int):
raise TypeError('fd should be int, not %r' % fd)
try:
return greenio.GreenPipe(fd, *args, **kw)
except OSError as e:
raise OSError(*e.args)
__original_read__ = os_orig.read
def read(fd, n):
"""read(fd, buffersize) -> string
Read a file descriptor."""
while True:
# don't wait to read for regular files
# select/poll will always return True while epoll will simply crash
st_mode = os_orig.stat(fd).st_mode
if not S_ISREG(st_mode):
try:
hubs.trampoline(fd, read=True)
except hubs.IOClosed:
return ''
try:
return __original_read__(fd, n)
except OSError as e:
if get_errno(e) == errno.EPIPE:
return ''
if get_errno(e) != errno.EAGAIN:
raise
__original_write__ = os_orig.write
def write(fd, st):
"""write(fd, string) -> byteswritten
Write a string to a file descriptor.
"""
while True:
# don't wait to write for regular files
# select/poll will always return True while epoll will simply crash
st_mode = os_orig.stat(fd).st_mode
if not S_ISREG(st_mode):
try:
hubs.trampoline(fd, write=True)
except hubs.IOClosed:
return 0
try:
return __original_write__(fd, st)
except OSError as e:
if get_errno(e) not in [errno.EAGAIN, errno.EPIPE]:
raise
def wait():
"""wait() -> (pid, status)
Wait for completion of a child process."""
return waitpid(0, 0)
__original_waitpid__ = os_orig.waitpid
def waitpid(pid, options):
"""waitpid(...)
waitpid(pid, options) -> (pid, status)
Wait for completion of a given child process."""
if options & os_orig.WNOHANG != 0:
return __original_waitpid__(pid, options)
else:
new_options = options | os_orig.WNOHANG
while True:
rpid, status = __original_waitpid__(pid, new_options)
if rpid and status >= 0:
return rpid, status
greenthread.sleep(0.01)
__original_open__ = os_orig.open
def open(file, flags, mode=0o777, dir_fd=None):
""" Wrap os.open
This behaves identically, but collaborates with
the hub's notify_opened protocol.
"""
# pathlib workaround #534 pathlib._NormalAccessor wraps `open` in
# `staticmethod` for py < 3.7 but not 3.7. That means we get here with
# `file` being a pathlib._NormalAccessor object, and the other arguments
# shifted. Fortunately pathlib doesn't use the `dir_fd` argument, so we
# have space in the parameter list. We use some heuristics to detect this
# and adjust the parameters (without importing pathlib)
if type(file).__name__ == '_NormalAccessor':
file, flags, mode, dir_fd = flags, mode, dir_fd, None
if dir_fd is not None:
fd = __original_open__(file, flags, mode, dir_fd=dir_fd)
else:
fd = __original_open__(file, flags, mode)
hubs.notify_opened(fd)
return fd

View File

@ -0,0 +1,257 @@
# Copyright (c) 2010, CCP Games
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of CCP Games nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY CCP GAMES ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CCP GAMES BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module is API-equivalent to the standard library :mod:`profile` module
lbut it is greenthread-aware as well as thread-aware. Use this module
to profile Eventlet-based applications in preference to either :mod:`profile` or :mod:`cProfile`.
FIXME: No testcases for this module.
"""
profile_orig = __import__('profile')
__all__ = profile_orig.__all__
from eventlet.patcher import slurp_properties
slurp_properties(profile_orig, globals(), srckeys=dir(profile_orig))
import sys
import functools
from eventlet import greenthread
from eventlet import patcher
import _thread
thread = patcher.original(_thread.__name__) # non-monkeypatched module needed
# This class provides the start() and stop() functions
class Profile(profile_orig.Profile):
base = profile_orig.Profile
def __init__(self, timer=None, bias=None):
self.current_tasklet = greenthread.getcurrent()
self.thread_id = thread.get_ident()
self.base.__init__(self, timer, bias)
self.sleeping = {}
def __call__(self, *args):
"""make callable, allowing an instance to be the profiler"""
self.dispatcher(*args)
def _setup(self):
self._has_setup = True
self.cur = None
self.timings = {}
self.current_tasklet = greenthread.getcurrent()
self.thread_id = thread.get_ident()
self.simulate_call("profiler")
def start(self, name="start"):
if getattr(self, "running", False):
return
self._setup()
self.simulate_call("start")
self.running = True
sys.setprofile(self.dispatcher)
def stop(self):
sys.setprofile(None)
self.running = False
self.TallyTimings()
# special cases for the original run commands, makin sure to
# clear the timer context.
def runctx(self, cmd, globals, locals):
if not getattr(self, "_has_setup", False):
self._setup()
try:
return profile_orig.Profile.runctx(self, cmd, globals, locals)
finally:
self.TallyTimings()
def runcall(self, func, *args, **kw):
if not getattr(self, "_has_setup", False):
self._setup()
try:
return profile_orig.Profile.runcall(self, func, *args, **kw)
finally:
self.TallyTimings()
def trace_dispatch_return_extend_back(self, frame, t):
"""A hack function to override error checking in parent class. It
allows invalid returns (where frames weren't preveiously entered into
the profiler) which can happen for all the tasklets that suddenly start
to get monitored. This means that the time will eventually be attributed
to a call high in the chain, when there is a tasklet switch
"""
if isinstance(self.cur[-2], Profile.fake_frame):
return False
self.trace_dispatch_call(frame, 0)
return self.trace_dispatch_return(frame, t)
def trace_dispatch_c_return_extend_back(self, frame, t):
# same for c return
if isinstance(self.cur[-2], Profile.fake_frame):
return False # ignore bogus returns
self.trace_dispatch_c_call(frame, 0)
return self.trace_dispatch_return(frame, t)
def SwitchTasklet(self, t0, t1, t):
# tally the time spent in the old tasklet
pt, it, et, fn, frame, rcur = self.cur
cur = (pt, it + t, et, fn, frame, rcur)
# we are switching to a new tasklet, store the old
self.sleeping[t0] = cur, self.timings
self.current_tasklet = t1
# find the new one
try:
self.cur, self.timings = self.sleeping.pop(t1)
except KeyError:
self.cur, self.timings = None, {}
self.simulate_call("profiler")
self.simulate_call("new_tasklet")
def TallyTimings(self):
oldtimings = self.sleeping
self.sleeping = {}
# first, unwind the main "cur"
self.cur = self.Unwind(self.cur, self.timings)
# we must keep the timings dicts separate for each tasklet, since it contains
# the 'ns' item, recursion count of each function in that tasklet. This is
# used in the Unwind dude.
for tasklet, (cur, timings) in oldtimings.items():
self.Unwind(cur, timings)
for k, v in timings.items():
if k not in self.timings:
self.timings[k] = v
else:
# accumulate all to the self.timings
cc, ns, tt, ct, callers = self.timings[k]
# ns should be 0 after unwinding
cc += v[0]
tt += v[2]
ct += v[3]
for k1, v1 in v[4].items():
callers[k1] = callers.get(k1, 0) + v1
self.timings[k] = cc, ns, tt, ct, callers
def Unwind(self, cur, timings):
"A function to unwind a 'cur' frame and tally the results"
"see profile.trace_dispatch_return() for details"
# also see simulate_cmd_complete()
while(cur[-1]):
rpt, rit, ret, rfn, frame, rcur = cur
frame_total = rit + ret
if rfn in timings:
cc, ns, tt, ct, callers = timings[rfn]
else:
cc, ns, tt, ct, callers = 0, 0, 0, 0, {}
if not ns:
ct = ct + frame_total
cc = cc + 1
if rcur:
ppt, pit, pet, pfn, pframe, pcur = rcur
else:
pfn = None
if pfn in callers:
callers[pfn] = callers[pfn] + 1 # hack: gather more
elif pfn:
callers[pfn] = 1
timings[rfn] = cc, ns - 1, tt + rit, ct, callers
ppt, pit, pet, pfn, pframe, pcur = rcur
rcur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
cur = rcur
return cur
def ContextWrap(f):
@functools.wraps(f)
def ContextWrapper(self, arg, t):
current = greenthread.getcurrent()
if current != self.current_tasklet:
self.SwitchTasklet(self.current_tasklet, current, t)
t = 0.0 # the time was billed to the previous tasklet
return f(self, arg, t)
return ContextWrapper
# Add "return safety" to the dispatchers
Profile.dispatch = dict(profile_orig.Profile.dispatch, **{
'return': Profile.trace_dispatch_return_extend_back,
'c_return': Profile.trace_dispatch_c_return_extend_back,
})
# Add automatic tasklet detection to the callbacks.
Profile.dispatch = {k: ContextWrap(v) for k, v in Profile.dispatch.items()}
# run statements shamelessly stolen from profile.py
def run(statement, filename=None, sort=-1):
"""Run statement under profiler optionally saving results in filename
This function takes a single argument that can be passed to the
"exec" statement, and an optional file name. In all cases this
routine attempts to "exec" its first argument and gather profiling
statistics from the execution. If no file name is present, then this
function automatically prints a simple profiling report, sorted by the
standard name string (file/line/function-name) that is presented in
each line.
"""
prof = Profile()
try:
prof = prof.run(statement)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
return prof.print_stats(sort)
def runctx(statement, globals, locals, filename=None):
"""Run statement under profiler, supplying your own globals and locals,
optionally saving results in filename.
statement and filename have the same semantics as profile.run
"""
prof = Profile()
try:
prof = prof.runctx(statement, globals, locals)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
return prof.print_stats()

View File

@ -0,0 +1,86 @@
import eventlet
from eventlet.hubs import get_hub
__select = eventlet.patcher.original('select')
error = __select.error
__patched__ = ['select']
__deleted__ = ['devpoll', 'poll', 'epoll', 'kqueue', 'kevent']
def get_fileno(obj):
# The purpose of this function is to exactly replicate
# the behavior of the select module when confronted with
# abnormal filenos; the details are extensively tested in
# the stdlib test/test_select.py.
try:
f = obj.fileno
except AttributeError:
if not isinstance(obj, int):
raise TypeError("Expected int or long, got %s" % type(obj))
return obj
else:
rv = f()
if not isinstance(rv, int):
raise TypeError("Expected int or long, got %s" % type(rv))
return rv
def select(read_list, write_list, error_list, timeout=None):
# error checking like this is required by the stdlib unit tests
if timeout is not None:
try:
timeout = float(timeout)
except ValueError:
raise TypeError("Expected number for timeout")
hub = get_hub()
timers = []
current = eventlet.getcurrent()
if hub.greenlet is current:
raise RuntimeError('do not call blocking functions from the mainloop')
ds = {}
for r in read_list:
ds[get_fileno(r)] = {'read': r}
for w in write_list:
ds.setdefault(get_fileno(w), {})['write'] = w
for e in error_list:
ds.setdefault(get_fileno(e), {})['error'] = e
listeners = []
def on_read(d):
original = ds[get_fileno(d)]['read']
current.switch(([original], [], []))
def on_write(d):
original = ds[get_fileno(d)]['write']
current.switch(([], [original], []))
def on_timeout2():
current.switch(([], [], []))
def on_timeout():
# ensure that BaseHub.run() has a chance to call self.wait()
# at least once before timed out. otherwise the following code
# can time out erroneously.
#
# s1, s2 = socket.socketpair()
# print(select.select([], [s1], [], 0))
timers.append(hub.schedule_call_global(0, on_timeout2))
if timeout is not None:
timers.append(hub.schedule_call_global(timeout, on_timeout))
try:
for k, v in ds.items():
if v.get('read'):
listeners.append(hub.add(hub.READ, k, on_read, current.throw, lambda: None))
if v.get('write'):
listeners.append(hub.add(hub.WRITE, k, on_write, current.throw, lambda: None))
try:
return hub.switch()
finally:
for l in listeners:
hub.remove(l)
finally:
for t in timers:
t.cancel()

View File

@ -0,0 +1,34 @@
import sys
from eventlet import patcher
from eventlet.green import select
__patched__ = [
'DefaultSelector',
'SelectSelector',
]
# We only have green select so the options are:
# * leave it be and have selectors that block
# * try to pretend the "bad" selectors don't exist
# * replace all with SelectSelector for the price of possibly different
# performance characteristic and missing fileno() method (if someone
# uses it it'll result in a crash, we may want to implement it in the future)
#
# This module used to follow the third approach but just removing the offending
# selectors is less error prone and less confusing approach.
__deleted__ = [
'PollSelector',
'EpollSelector',
'DevpollSelector',
'KqueueSelector',
]
patcher.inject('selectors', globals(), ('select', select))
del patcher
if sys.platform != 'win32':
SelectSelector._select = staticmethod(select.select)
DefaultSelector = SelectSelector

View File

@ -0,0 +1,63 @@
import os
import sys
__import__('eventlet.green._socket_nodns')
__socket = sys.modules['eventlet.green._socket_nodns']
__all__ = __socket.__all__
__patched__ = __socket.__patched__ + [
'create_connection',
'getaddrinfo',
'gethostbyname',
'gethostbyname_ex',
'getnameinfo',
]
from eventlet.patcher import slurp_properties
slurp_properties(__socket, globals(), srckeys=dir(__socket))
if os.environ.get("EVENTLET_NO_GREENDNS", '').lower() != 'yes':
from eventlet.support import greendns
gethostbyname = greendns.gethostbyname
getaddrinfo = greendns.getaddrinfo
gethostbyname_ex = greendns.gethostbyname_ex
getnameinfo = greendns.getnameinfo
del greendns
def create_connection(address,
timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used.
"""
err = "getaddrinfo returns an empty list"
host, port = address
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as e:
err = e
if sock is not None:
sock.close()
if not isinstance(err, error):
err = error(err)
raise err

View File

@ -0,0 +1,487 @@
__ssl = __import__('ssl')
from eventlet.patcher import slurp_properties
slurp_properties(__ssl, globals(), srckeys=dir(__ssl))
import sys
from eventlet import greenio, hubs
from eventlet.greenio import (
GreenSocket, CONNECT_ERR, CONNECT_SUCCESS,
)
from eventlet.hubs import trampoline, IOClosed
from eventlet.support import get_errno, PY33
from contextlib import contextmanager
orig_socket = __import__('socket')
socket = orig_socket.socket
timeout_exc = orig_socket.timeout
__patched__ = [
'SSLSocket', 'SSLContext', 'wrap_socket', 'sslwrap_simple',
'create_default_context', '_create_default_https_context']
_original_sslsocket = __ssl.SSLSocket
_original_sslcontext = __ssl.SSLContext
_is_py_3_7 = sys.version_info[:2] == (3, 7)
_original_wrap_socket = __ssl.SSLContext.wrap_socket
@contextmanager
def _original_ssl_context(*args, **kwargs):
tmp_sslcontext = _original_wrap_socket.__globals__.get('SSLContext', None)
tmp_sslsocket = _original_sslsocket._create.__globals__.get('SSLSocket', None)
_original_sslsocket._create.__globals__['SSLSocket'] = _original_sslsocket
_original_wrap_socket.__globals__['SSLContext'] = _original_sslcontext
try:
yield
finally:
_original_wrap_socket.__globals__['SSLContext'] = tmp_sslcontext
_original_sslsocket._create.__globals__['SSLSocket'] = tmp_sslsocket
class GreenSSLSocket(_original_sslsocket):
""" This is a green version of the SSLSocket class from the ssl module added
in 2.6. For documentation on it, please see the Python standard
documentation.
Python nonblocking ssl objects don't give errors when the other end
of the socket is closed (they do notice when the other end is shutdown,
though). Any write/read operations will simply hang if the socket is
closed from the other end. There is no obvious fix for this problem;
it appears to be a limitation of Python's ssl object implementation.
A workaround is to set a reasonable timeout on the socket using
settimeout(), and to close/reopen the connection when a timeout
occurs at an unexpected juncture in the code.
"""
def __new__(cls, sock=None, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_TLS, ca_certs=None,
do_handshake_on_connect=True, *args, **kw):
if not isinstance(sock, GreenSocket):
sock = GreenSocket(sock)
with _original_ssl_context():
context = kw.get('_context')
if context:
ret = _original_sslsocket._create(
sock=sock.fd,
server_side=server_side,
do_handshake_on_connect=False,
suppress_ragged_eofs=kw.get('suppress_ragged_eofs', True),
server_hostname=kw.get('server_hostname'),
context=context,
session=kw.get('session'),
)
else:
ret = cls._wrap_socket(
sock=sock.fd,
keyfile=keyfile,
certfile=certfile,
server_side=server_side,
cert_reqs=cert_reqs,
ssl_version=ssl_version,
ca_certs=ca_certs,
do_handshake_on_connect=False,
ciphers=kw.get('ciphers'),
)
ret.keyfile = keyfile
ret.certfile = certfile
ret.cert_reqs = cert_reqs
ret.ssl_version = ssl_version
ret.ca_certs = ca_certs
ret.__class__ = GreenSSLSocket
return ret
@staticmethod
def _wrap_socket(sock, keyfile, certfile, server_side, cert_reqs,
ssl_version, ca_certs, do_handshake_on_connect, ciphers):
context = _original_sslcontext(protocol=ssl_version)
context.options |= cert_reqs
if certfile or keyfile:
context.load_cert_chain(
certfile=certfile,
keyfile=keyfile,
)
if ca_certs:
context.load_verify_locations(ca_certs)
if ciphers:
context.set_ciphers(ciphers)
return context.wrap_socket(
sock=sock,
server_side=server_side,
do_handshake_on_connect=do_handshake_on_connect,
)
# we are inheriting from SSLSocket because its constructor calls
# do_handshake whose behavior we wish to override
def __init__(self, sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_TLS, ca_certs=None,
do_handshake_on_connect=True, *args, **kw):
if not isinstance(sock, GreenSocket):
sock = GreenSocket(sock)
self.act_non_blocking = sock.act_non_blocking
# the superclass initializer trashes the methods so we remove
# the local-object versions of them and let the actual class
# methods shine through
# Note: This for Python 2
try:
for fn in orig_socket._delegate_methods:
delattr(self, fn)
except AttributeError:
pass
# Python 3 SSLSocket construction process overwrites the timeout so restore it
self._timeout = sock.gettimeout()
# it also sets timeout to None internally apparently (tested with 3.4.2)
_original_sslsocket.settimeout(self, 0.0)
assert _original_sslsocket.gettimeout(self) == 0.0
# see note above about handshaking
self.do_handshake_on_connect = do_handshake_on_connect
if do_handshake_on_connect and self._connected:
self.do_handshake()
def settimeout(self, timeout):
self._timeout = timeout
def gettimeout(self):
return self._timeout
def setblocking(self, flag):
if flag:
self.act_non_blocking = False
self._timeout = None
else:
self.act_non_blocking = True
self._timeout = 0.0
def _call_trampolining(self, func, *a, **kw):
if self.act_non_blocking:
return func(*a, **kw)
else:
while True:
try:
return func(*a, **kw)
except SSLError as exc:
if get_errno(exc) == SSL_ERROR_WANT_READ:
trampoline(self,
read=True,
timeout=self.gettimeout(),
timeout_exc=timeout_exc('timed out'))
elif get_errno(exc) == SSL_ERROR_WANT_WRITE:
trampoline(self,
write=True,
timeout=self.gettimeout(),
timeout_exc=timeout_exc('timed out'))
elif _is_py_3_7 and "unexpected eof" in exc.args[1]:
# For reasons I don't understand on 3.7 we get [ssl:
# KRB5_S_TKT_NYV] unexpected eof while reading]
# errors...
raise IOClosed
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
return self._call_trampolining(
super().write, data)
def read(self, len=1024, buffer=None):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
try:
return self._call_trampolining(
super().read, len, buffer)
except IOClosed:
if buffer is None:
return b''
else:
return 0
def send(self, data, flags=0):
if self._sslobj:
return self._call_trampolining(
super().send, data, flags)
else:
trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
return socket.send(self, data, flags)
def sendto(self, data, addr, flags=0):
# *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
else:
trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
return socket.sendto(self, data, addr, flags)
def sendall(self, data, flags=0):
# *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
data_to_send = data
while (count < amount):
v = self.send(data_to_send)
count += v
if v == 0:
trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
else:
data_to_send = data[count:]
return amount
else:
while True:
try:
return socket.sendall(self, data, flags)
except orig_socket.error as e:
if self.act_non_blocking:
raise
erno = get_errno(e)
if erno in greenio.SOCKET_BLOCKING:
trampoline(self, write=True,
timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
elif erno in greenio.SOCKET_CLOSED:
return ''
raise
def recv(self, buflen=1024, flags=0):
return self._base_recv(buflen, flags, into=False)
def recv_into(self, buffer, nbytes=None, flags=0):
# Copied verbatim from CPython
if buffer and nbytes is None:
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
# end of CPython code
return self._base_recv(nbytes, flags, into=True, buffer_=buffer)
def _base_recv(self, nbytes, flags, into, buffer_=None):
if into:
plain_socket_function = socket.recv_into
else:
plain_socket_function = socket.recv
# *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to %s() on %s" %
plain_socket_function.__name__, self.__class__)
if into:
read = self.read(nbytes, buffer_)
else:
read = self.read(nbytes)
return read
else:
while True:
try:
args = [self, nbytes, flags]
if into:
args.insert(1, buffer_)
return plain_socket_function(*args)
except orig_socket.error as e:
if self.act_non_blocking:
raise
erno = get_errno(e)
if erno in greenio.SOCKET_BLOCKING:
try:
trampoline(
self, read=True,
timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
except IOClosed:
return b''
elif erno in greenio.SOCKET_CLOSED:
return b''
raise
def recvfrom(self, addr, buflen=1024, flags=0):
if not self.act_non_blocking:
trampoline(self, read=True, timeout=self.gettimeout(),
timeout_exc=timeout_exc('timed out'))
return super().recvfrom(addr, buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
if not self.act_non_blocking:
trampoline(self, read=True, timeout=self.gettimeout(),
timeout_exc=timeout_exc('timed out'))
return super().recvfrom_into(buffer, nbytes, flags)
def unwrap(self):
return GreenSocket(self._call_trampolining(
super().unwrap))
def do_handshake(self):
"""Perform a TLS/SSL handshake."""
return self._call_trampolining(
super().do_handshake)
def _socket_connect(self, addr):
real_connect = socket.connect
if self.act_non_blocking:
return real_connect(self, addr)
else:
clock = hubs.get_hub().clock
# *NOTE: gross, copied code from greenio because it's not factored
# well enough to reuse
if self.gettimeout() is None:
while True:
try:
return real_connect(self, addr)
except orig_socket.error as exc:
if get_errno(exc) in CONNECT_ERR:
trampoline(self, write=True)
elif get_errno(exc) in CONNECT_SUCCESS:
return
else:
raise
else:
end = clock() + self.gettimeout()
while True:
try:
real_connect(self, addr)
except orig_socket.error as exc:
if get_errno(exc) in CONNECT_ERR:
trampoline(
self, write=True,
timeout=end - clock(), timeout_exc=timeout_exc('timed out'))
elif get_errno(exc) in CONNECT_SUCCESS:
return
else:
raise
if clock() >= end:
raise timeout_exc('timed out')
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
# *NOTE: grrrrr copied this code from ssl.py because of the reference
# to socket.connect which we don't want to call directly
if self._sslobj:
raise ValueError("attempt to connect already-connected SSLSocket!")
self._socket_connect(addr)
server_side = False
try:
sslwrap = _ssl.sslwrap
except AttributeError:
# sslwrap was removed in 3.x and later in 2.7.9
context = self.context if PY33 else self._context
sslobj = context._wrap_socket(self, server_side, server_hostname=self.server_hostname)
else:
sslobj = sslwrap(self._sock, server_side, self.keyfile, self.certfile,
self.cert_reqs, self.ssl_version,
self.ca_certs, *self.ciphers)
try:
# This is added in Python 3.5, http://bugs.python.org/issue21965
SSLObject
except NameError:
self._sslobj = sslobj
else:
self._sslobj = sslobj
if self.do_handshake_on_connect:
self.do_handshake()
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
# RDW grr duplication of code from greenio
if self.act_non_blocking:
newsock, addr = socket.accept(self)
else:
while True:
try:
newsock, addr = socket.accept(self)
break
except orig_socket.error as e:
if get_errno(e) not in greenio.SOCKET_BLOCKING:
raise
trampoline(self, read=True, timeout=self.gettimeout(),
timeout_exc=timeout_exc('timed out'))
new_ssl = type(self)(
newsock,
server_side=True,
do_handshake_on_connect=False,
suppress_ragged_eofs=self.suppress_ragged_eofs,
_context=self._context,
)
return (new_ssl, addr)
def dup(self):
raise NotImplementedError("Can't dup an ssl object")
SSLSocket = GreenSSLSocket
def wrap_socket(sock, *a, **kw):
return GreenSSLSocket(sock, *a, **kw)
class GreenSSLContext(_original_sslcontext):
__slots__ = ()
def wrap_socket(self, sock, *a, **kw):
return GreenSSLSocket(sock, *a, _context=self, **kw)
# https://github.com/eventlet/eventlet/issues/371
# Thanks to Gevent developers for sharing patch to this problem.
if hasattr(_original_sslcontext.options, 'setter'):
# In 3.6, these became properties. They want to access the
# property __set__ method in the superclass, and they do so by using
# super(SSLContext, SSLContext). But we rebind SSLContext when we monkey
# patch, which causes infinite recursion.
# https://github.com/python/cpython/commit/328067c468f82e4ec1b5c510a4e84509e010f296
@_original_sslcontext.options.setter
def options(self, value):
super(_original_sslcontext, _original_sslcontext).options.__set__(self, value)
@_original_sslcontext.verify_flags.setter
def verify_flags(self, value):
super(_original_sslcontext, _original_sslcontext).verify_flags.__set__(self, value)
@_original_sslcontext.verify_mode.setter
def verify_mode(self, value):
super(_original_sslcontext, _original_sslcontext).verify_mode.__set__(self, value)
if hasattr(_original_sslcontext, "maximum_version"):
@_original_sslcontext.maximum_version.setter
def maximum_version(self, value):
super(_original_sslcontext, _original_sslcontext).maximum_version.__set__(self, value)
if hasattr(_original_sslcontext, "minimum_version"):
@_original_sslcontext.minimum_version.setter
def minimum_version(self, value):
super(_original_sslcontext, _original_sslcontext).minimum_version.__set__(self, value)
SSLContext = GreenSSLContext
# TODO: ssl.create_default_context() was added in 2.7.9.
# Not clear we're still trying to support Python versions even older than that.
if hasattr(__ssl, 'create_default_context'):
_original_create_default_context = __ssl.create_default_context
def green_create_default_context(*a, **kw):
# We can't just monkey-patch on the green version of `wrap_socket`
# on to SSLContext instances, but SSLContext.create_default_context
# does a bunch of work. Rather than re-implementing it all, just
# switch out the __class__ to get our `wrap_socket` implementation
context = _original_create_default_context(*a, **kw)
context.__class__ = GreenSSLContext
return context
create_default_context = green_create_default_context
_create_default_https_context = green_create_default_context

View File

@ -0,0 +1,137 @@
import errno
import sys
from types import FunctionType
import eventlet
from eventlet import greenio
from eventlet import patcher
from eventlet.green import select, threading, time
__patched__ = ['call', 'check_call', 'Popen']
to_patch = [('select', select), ('threading', threading), ('time', time)]
from eventlet.green import selectors
to_patch.append(('selectors', selectors))
patcher.inject('subprocess', globals(), *to_patch)
subprocess_orig = patcher.original("subprocess")
subprocess_imported = sys.modules.get('subprocess', subprocess_orig)
mswindows = sys.platform == "win32"
if getattr(subprocess_orig, 'TimeoutExpired', None) is None:
# Backported from Python 3.3.
# https://bitbucket.org/eventlet/eventlet/issue/89
class TimeoutExpired(Exception):
"""This exception is raised when the timeout expires while waiting for
a child process.
"""
def __init__(self, cmd, timeout, output=None):
self.cmd = cmd
self.timeout = timeout
self.output = output
def __str__(self):
return ("Command '%s' timed out after %s seconds" %
(self.cmd, self.timeout))
else:
TimeoutExpired = subprocess_imported.TimeoutExpired
# This is the meat of this module, the green version of Popen.
class Popen(subprocess_orig.Popen):
"""eventlet-friendly version of subprocess.Popen"""
# We do not believe that Windows pipes support non-blocking I/O. At least,
# the Python file objects stored on our base-class object have no
# setblocking() method, and the Python fcntl module doesn't exist on
# Windows. (see eventlet.greenio.set_nonblocking()) As the sole purpose of
# this __init__() override is to wrap the pipes for eventlet-friendly
# non-blocking I/O, don't even bother overriding it on Windows.
if not mswindows:
def __init__(self, args, bufsize=0, *argss, **kwds):
self.args = args
# Forward the call to base-class constructor
subprocess_orig.Popen.__init__(self, args, 0, *argss, **kwds)
# Now wrap the pipes, if any. This logic is loosely borrowed from
# eventlet.processes.Process.run() method.
for attr in "stdin", "stdout", "stderr":
pipe = getattr(self, attr)
if pipe is not None and type(pipe) != greenio.GreenPipe:
# https://github.com/eventlet/eventlet/issues/243
# AttributeError: '_io.TextIOWrapper' object has no attribute 'mode'
mode = getattr(pipe, 'mode', '')
if not mode:
if pipe.readable():
mode += 'r'
if pipe.writable():
mode += 'w'
# ValueError: can't have unbuffered text I/O
if bufsize == 0:
bufsize = -1
wrapped_pipe = greenio.GreenPipe(pipe, mode, bufsize)
setattr(self, attr, wrapped_pipe)
__init__.__doc__ = subprocess_orig.Popen.__init__.__doc__
def wait(self, timeout=None, check_interval=0.01):
# Instead of a blocking OS call, this version of wait() uses logic
# borrowed from the eventlet 0.2 processes.Process.wait() method.
if timeout is not None:
endtime = time.time() + timeout
try:
while True:
status = self.poll()
if status is not None:
return status
if timeout is not None and time.time() > endtime:
raise TimeoutExpired(self.args, timeout)
eventlet.sleep(check_interval)
except OSError as e:
if e.errno == errno.ECHILD:
# no child process, this happens if the child process
# already died and has been cleaned up
return -1
else:
raise
wait.__doc__ = subprocess_orig.Popen.wait.__doc__
if not mswindows:
# don't want to rewrite the original _communicate() method, we
# just want a version that uses eventlet.green.select.select()
# instead of select.select().
_communicate = FunctionType(
subprocess_orig.Popen._communicate.__code__,
globals())
try:
_communicate_with_select = FunctionType(
subprocess_orig.Popen._communicate_with_select.__code__,
globals())
_communicate_with_poll = FunctionType(
subprocess_orig.Popen._communicate_with_poll.__code__,
globals())
except AttributeError:
pass
# Borrow subprocess.call() and check_call(), but patch them so they reference
# OUR Popen class rather than subprocess.Popen.
def patched_function(function):
new_function = FunctionType(function.__code__, globals())
new_function.__kwdefaults__ = function.__kwdefaults__
new_function.__defaults__ = function.__defaults__
return new_function
call = patched_function(subprocess_orig.call)
check_call = patched_function(subprocess_orig.check_call)
# check_output is Python 2.7+
if hasattr(subprocess_orig, 'check_output'):
__patched__.append('check_output')
check_output = patched_function(subprocess_orig.check_output)
del patched_function
# Keep exceptions identity.
# https://github.com/eventlet/eventlet/issues/413
CalledProcessError = subprocess_imported.CalledProcessError
del subprocess_imported

View File

@ -0,0 +1,176 @@
"""Implements the standard thread module, using greenthreads."""
import _thread as __thread
from eventlet.support import greenlets as greenlet
from eventlet import greenthread
from eventlet.timeout import with_timeout
from eventlet.lock import Lock
import sys
__patched__ = ['Lock', 'LockType', '_ThreadHandle', '_count',
'_get_main_thread_ident', '_local', '_make_thread_handle',
'allocate', 'allocate_lock', 'exit', 'get_ident',
'interrupt_main', 'stack_size', 'start_joinable_thread',
'start_new', 'start_new_thread']
error = __thread.error
LockType = Lock
__threadcount = 0
if hasattr(__thread, "_is_main_interpreter"):
_is_main_interpreter = __thread._is_main_interpreter
def _set_sentinel():
# TODO this is a dummy code, reimplementing this may be needed:
# https://hg.python.org/cpython/file/b5e9bc4352e1/Modules/_threadmodule.c#l1203
return allocate_lock()
TIMEOUT_MAX = __thread.TIMEOUT_MAX
def _count():
return __threadcount
def get_ident(gr=None):
if gr is None:
return id(greenlet.getcurrent())
else:
return id(gr)
def __thread_body(func, args, kwargs):
global __threadcount
__threadcount += 1
try:
func(*args, **kwargs)
finally:
__threadcount -= 1
class _ThreadHandle:
def __init__(self, greenthread=None):
self._greenthread = greenthread
self._done = False
def _set_done(self):
self._done = True
def is_done(self):
return self._done
@property
def ident(self):
return get_ident(self._greenthread)
def join(self, timeout=None):
if not hasattr(self._greenthread, "wait"):
return
if timeout is not None:
return with_timeout(timeout, self._greenthread.wait)
return self._greenthread.wait()
def _make_thread_handle(ident):
greenthread = greenlet.getcurrent()
assert ident == get_ident(greenthread)
return _ThreadHandle(greenthread=greenthread)
def __spawn_green(function, args=(), kwargs=None, joinable=False):
if ((3, 4) <= sys.version_info < (3, 13)
and getattr(function, '__module__', '') == 'threading'
and hasattr(function, '__self__')):
# In Python 3.4-3.12, threading.Thread uses an internal lock
# automatically released when the python thread state is deleted.
# With monkey patching, eventlet uses green threads without python
# thread state, so the lock is not automatically released.
#
# Wrap _bootstrap_inner() to release explicitly the thread state lock
# when the thread completes.
thread = function.__self__
bootstrap_inner = thread._bootstrap_inner
def wrap_bootstrap_inner():
try:
bootstrap_inner()
finally:
# The lock can be cleared (ex: by a fork())
if getattr(thread, "_tstate_lock", None) is not None:
thread._tstate_lock.release()
thread._bootstrap_inner = wrap_bootstrap_inner
kwargs = kwargs or {}
spawn_func = greenthread.spawn if joinable else greenthread.spawn_n
return spawn_func(__thread_body, function, args, kwargs)
def start_joinable_thread(function, handle=None, daemon=True):
g = __spawn_green(function, joinable=True)
if handle is None:
handle = _ThreadHandle(greenthread=g)
else:
handle._greenthread = g
return handle
def start_new_thread(function, args=(), kwargs=None):
g = __spawn_green(function, args=args, kwargs=kwargs)
return get_ident(g)
start_new = start_new_thread
def _get_main_thread_ident():
greenthread = greenlet.getcurrent()
while greenthread.parent is not None:
greenthread = greenthread.parent
return get_ident(greenthread)
def allocate_lock(*a):
return LockType(1)
allocate = allocate_lock
def exit():
raise greenlet.GreenletExit
exit_thread = __thread.exit_thread
def interrupt_main():
curr = greenlet.getcurrent()
if curr.parent and not curr.parent.dead:
curr.parent.throw(KeyboardInterrupt())
else:
raise KeyboardInterrupt()
if hasattr(__thread, 'stack_size'):
__original_stack_size__ = __thread.stack_size
def stack_size(size=None):
if size is None:
return __original_stack_size__()
if size > __original_stack_size__():
return __original_stack_size__(size)
else:
pass
# not going to decrease stack_size, because otherwise other greenlets in
# this thread will suffer
from eventlet.corolocal import local as _local
if hasattr(__thread, 'daemon_threads_allowed'):
daemon_threads_allowed = __thread.daemon_threads_allowed
if hasattr(__thread, '_shutdown'):
_shutdown = __thread._shutdown

View File

@ -0,0 +1,132 @@
"""Implements the standard threading module, using greenthreads."""
import eventlet
from eventlet.green import thread
from eventlet.green import time
from eventlet.support import greenlets as greenlet
__patched__ = ['Lock', '_after_fork', '_allocate_lock', '_get_main_thread_ident',
'_make_thread_handle', '_shutdown', '_sleep',
'_start_joinable_thread', '_start_new_thread', '_ThreadHandle',
'currentThread', 'current_thread', 'local', 'stack_size']
__patched__ += ['get_ident', '_set_sentinel']
__orig_threading = eventlet.patcher.original('threading')
__threadlocal = __orig_threading.local()
__patched_enumerate = None
eventlet.patcher.inject(
'threading',
globals(),
('_thread', thread),
('time', time))
_count = 1
class _GreenThread:
"""Wrapper for GreenThread objects to provide Thread-like attributes
and methods"""
def __init__(self, g):
global _count
self._g = g
self._name = 'GreenThread-%d' % _count
_count += 1
def __repr__(self):
return '<_GreenThread(%s, %r)>' % (self._name, self._g)
def join(self, timeout=None):
return self._g.wait()
def getName(self):
return self._name
get_name = getName
def setName(self, name):
self._name = str(name)
set_name = setName
name = property(getName, setName)
ident = property(lambda self: id(self._g))
def isAlive(self):
return True
is_alive = isAlive
daemon = property(lambda self: True)
def isDaemon(self):
return self.daemon
is_daemon = isDaemon
__threading = None
def _fixup_thread(t):
# Some third-party packages (lockfile) will try to patch the
# threading.Thread class with a get_name attribute if it doesn't
# exist. Since we might return Thread objects from the original
# threading package that won't get patched, let's make sure each
# individual object gets patched too our patched threading.Thread
# class has been patched. This is why monkey patching can be bad...
global __threading
if not __threading:
__threading = __import__('threading')
if (hasattr(__threading.Thread, 'get_name') and
not hasattr(t, 'get_name')):
t.get_name = t.getName
return t
def current_thread():
global __patched_enumerate
g = greenlet.getcurrent()
if not g:
# Not currently in a greenthread, fall back to standard function
return _fixup_thread(__orig_threading.current_thread())
try:
active = __threadlocal.active
except AttributeError:
active = __threadlocal.active = {}
g_id = id(g)
t = active.get(g_id)
if t is not None:
return t
# FIXME: move import from function body to top
# (jaketesler@github) Furthermore, I was unable to have the current_thread() return correct results from
# threading.enumerate() unless the enumerate() function was a) imported at runtime using the gross __import__() call
# and b) was hot-patched using patch_function().
# https://github.com/eventlet/eventlet/issues/172#issuecomment-379421165
if __patched_enumerate is None:
__patched_enumerate = eventlet.patcher.patch_function(__import__('threading').enumerate)
found = [th for th in __patched_enumerate() if th.ident == g_id]
if found:
return found[0]
# Add green thread to active if we can clean it up on exit
def cleanup(g):
del active[g_id]
try:
g.link(cleanup)
except AttributeError:
# Not a GreenThread type, so there's no way to hook into
# the green thread exiting. Fall back to the standard
# function then.
t = _fixup_thread(__orig_threading.current_thread())
else:
t = active[g_id] = _GreenThread(g)
return t
currentThread = current_thread

View File

@ -0,0 +1,6 @@
__time = __import__('time')
from eventlet.patcher import slurp_properties
__patched__ = ['sleep']
slurp_properties(__time, globals(), ignore=__patched__, srckeys=dir(__time))
from eventlet.greenthread import sleep
sleep # silence pyflakes

View File

@ -0,0 +1,5 @@
from eventlet import patcher
from eventlet.green import socket
from eventlet.green import time
from eventlet.green import httplib
from eventlet.green import ftplib

View File

@ -0,0 +1,4 @@
from eventlet import patcher
from eventlet.green.urllib import response
patcher.inject('urllib.error', globals(), ('urllib.response', response))
del patcher

View File

@ -0,0 +1,3 @@
from eventlet import patcher
patcher.inject('urllib.parse', globals())
del patcher

View File

@ -0,0 +1,50 @@
from eventlet import patcher
from eventlet.green import ftplib, http, os, socket, time
from eventlet.green.http import client as http_client
from eventlet.green.urllib import error, parse, response
# TODO should we also have green email version?
# import email
to_patch = [
# This (http module) is needed here, otherwise test__greenness hangs
# forever on Python 3 because parts of non-green http (including
# http.client) leak into our patched urllib.request. There may be a nicer
# way to handle this (I didn't dig too deep) but this does the job. Jakub
('http', http),
('http.client', http_client),
('os', os),
('socket', socket),
('time', time),
('urllib.error', error),
('urllib.parse', parse),
('urllib.response', response),
]
try:
from eventlet.green import ssl
except ImportError:
pass
else:
to_patch.append(('ssl', ssl))
patcher.inject('urllib.request', globals(), *to_patch)
del to_patch
to_patch_in_functions = [('ftplib', ftplib)]
del ftplib
FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, *to_patch_in_functions)
URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, *to_patch_in_functions)
ftperrors = patcher.patch_function(ftperrors, *to_patch_in_functions)
ftpwrapper.init = patcher.patch_function(ftpwrapper.init, *to_patch_in_functions)
ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, *to_patch_in_functions)
del error
del parse
del response
del to_patch_in_functions

View File

@ -0,0 +1,3 @@
from eventlet import patcher
patcher.inject('urllib.response', globals())
del patcher

View File

@ -0,0 +1,20 @@
from eventlet import patcher
from eventlet.green import ftplib
from eventlet.green import httplib
from eventlet.green import socket
from eventlet.green import ssl
from eventlet.green import time
from eventlet.green import urllib
patcher.inject(
'urllib2',
globals(),
('httplib', httplib),
('socket', socket),
('ssl', ssl),
('time', time),
('urllib', urllib))
FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, ('ftplib', ftplib))
del patcher

View File

@ -0,0 +1,465 @@
"""The :mod:`zmq` module wraps the :class:`Socket` and :class:`Context`
found in :mod:`pyzmq <zmq>` to be non blocking.
"""
__zmq__ = __import__('zmq')
import eventlet.hubs
from eventlet.patcher import slurp_properties
from eventlet.support import greenlets as greenlet
__patched__ = ['Context', 'Socket']
slurp_properties(__zmq__, globals(), ignore=__patched__)
from collections import deque
try:
# alias XREQ/XREP to DEALER/ROUTER if available
if not hasattr(__zmq__, 'XREQ'):
XREQ = DEALER
if not hasattr(__zmq__, 'XREP'):
XREP = ROUTER
except NameError:
pass
class LockReleaseError(Exception):
pass
class _QueueLock:
"""A Lock that can be acquired by at most one thread. Any other
thread calling acquire will be blocked in a queue. When release
is called, the threads are awoken in the order they blocked,
one at a time. This lock can be required recursively by the same
thread."""
def __init__(self):
self._waiters = deque()
self._count = 0
self._holder = None
self._hub = eventlet.hubs.get_hub()
def __nonzero__(self):
return bool(self._count)
__bool__ = __nonzero__
def __enter__(self):
self.acquire()
def __exit__(self, type, value, traceback):
self.release()
def acquire(self):
current = greenlet.getcurrent()
if (self._waiters or self._count > 0) and self._holder is not current:
# block until lock is free
self._waiters.append(current)
self._hub.switch()
w = self._waiters.popleft()
assert w is current, 'Waiting threads woken out of order'
assert self._count == 0, 'After waking a thread, the lock must be unacquired'
self._holder = current
self._count += 1
def release(self):
if self._count <= 0:
raise LockReleaseError("Cannot release unacquired lock")
self._count -= 1
if self._count == 0:
self._holder = None
if self._waiters:
# wake next
self._hub.schedule_call_global(0, self._waiters[0].switch)
class _BlockedThread:
"""Is either empty, or represents a single blocked thread that
blocked itself by calling the block() method. The thread can be
awoken by calling wake(). Wake() can be called multiple times and
all but the first call will have no effect."""
def __init__(self):
self._blocked_thread = None
self._wakeupper = None
self._hub = eventlet.hubs.get_hub()
def __nonzero__(self):
return self._blocked_thread is not None
__bool__ = __nonzero__
def block(self, deadline=None):
if self._blocked_thread is not None:
raise Exception("Cannot block more than one thread on one BlockedThread")
self._blocked_thread = greenlet.getcurrent()
if deadline is not None:
self._hub.schedule_call_local(deadline - self._hub.clock(), self.wake)
try:
self._hub.switch()
finally:
self._blocked_thread = None
# cleanup the wakeup task
if self._wakeupper is not None:
# Important to cancel the wakeup task so it doesn't
# spuriously wake this greenthread later on.
self._wakeupper.cancel()
self._wakeupper = None
def wake(self):
"""Schedules the blocked thread to be awoken and return
True. If wake has already been called or if there is no
blocked thread, then this call has no effect and returns
False."""
if self._blocked_thread is not None and self._wakeupper is None:
self._wakeupper = self._hub.schedule_call_global(0, self._blocked_thread.switch)
return True
return False
class Context(__zmq__.Context):
"""Subclass of :class:`zmq.Context`
"""
def socket(self, socket_type):
"""Overridden method to ensure that the green version of socket is used
Behaves the same as :meth:`zmq.Context.socket`, but ensures
that a :class:`Socket` with all of its send and recv methods set to be
non-blocking is returned
"""
if self.closed:
raise ZMQError(ENOTSUP)
return Socket(self, socket_type)
def _wraps(source_fn):
"""A decorator that copies the __name__ and __doc__ from the given
function
"""
def wrapper(dest_fn):
dest_fn.__name__ = source_fn.__name__
dest_fn.__doc__ = source_fn.__doc__
return dest_fn
return wrapper
# Implementation notes: Each socket in 0mq contains a pipe that the
# background IO threads use to communicate with the socket. These
# events are important because they tell the socket when it is able to
# send and when it has messages waiting to be received. The read end
# of the events pipe is the same FD that getsockopt(zmq.FD) returns.
#
# Events are read from the socket's event pipe only on the thread that
# the 0mq context is associated with, which is the native thread the
# greenthreads are running on, and the only operations that cause the
# events to be read and processed are send(), recv() and
# getsockopt(zmq.EVENTS). This means that after doing any of these
# three operations, the ability of the socket to send or receive a
# message without blocking may have changed, but after the events are
# read the FD is no longer readable so the hub may not signal our
# listener.
#
# If we understand that after calling send() a message might be ready
# to be received and that after calling recv() a message might be able
# to be sent, what should we do next? There are two approaches:
#
# 1. Always wake the other thread if there is one waiting. This
# wakeup may be spurious because the socket might not actually be
# ready for a send() or recv(). However, if a thread is in a
# tight-loop successfully calling send() or recv() then the wakeups
# are naturally batched and there's very little cost added to each
# send/recv call.
#
# or
#
# 2. Call getsockopt(zmq.EVENTS) and explicitly check if the other
# thread should be woken up. This avoids spurious wake-ups but may
# add overhead because getsockopt will cause all events to be
# processed, whereas send and recv throttle processing
# events. Admittedly, all of the events will need to be processed
# eventually, but it is likely faster to batch the processing.
#
# Which approach is better? I have no idea.
#
# TODO:
# - Support MessageTrackers and make MessageTracker.wait green
_Socket = __zmq__.Socket
_Socket_recv = _Socket.recv
_Socket_send = _Socket.send
_Socket_send_multipart = _Socket.send_multipart
_Socket_recv_multipart = _Socket.recv_multipart
_Socket_send_string = _Socket.send_string
_Socket_recv_string = _Socket.recv_string
_Socket_send_pyobj = _Socket.send_pyobj
_Socket_recv_pyobj = _Socket.recv_pyobj
_Socket_send_json = _Socket.send_json
_Socket_recv_json = _Socket.recv_json
_Socket_getsockopt = _Socket.getsockopt
class Socket(_Socket):
"""Green version of :class:``zmq.core.socket.Socket``.
The following three methods are always overridden:
* send
* recv
* getsockopt
To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
is deferred to the hub (using :func:``eventlet.hubs.trampoline``) if a
``zmq.EAGAIN`` (retry) error is raised.
For some socket types, the following methods are also overridden:
* send_multipart
* recv_multipart
"""
def __init__(self, context, socket_type):
super().__init__(context, socket_type)
self.__dict__['_eventlet_send_event'] = _BlockedThread()
self.__dict__['_eventlet_recv_event'] = _BlockedThread()
self.__dict__['_eventlet_send_lock'] = _QueueLock()
self.__dict__['_eventlet_recv_lock'] = _QueueLock()
def event(fd):
# Some events arrived at the zmq socket. This may mean
# there's a message that can be read or there's space for
# a message to be written.
send_wake = self._eventlet_send_event.wake()
recv_wake = self._eventlet_recv_event.wake()
if not send_wake and not recv_wake:
# if no waiting send or recv thread was woken up, then
# force the zmq socket's events to be processed to
# avoid repeated wakeups
_Socket_getsockopt(self, EVENTS)
hub = eventlet.hubs.get_hub()
self.__dict__['_eventlet_listener'] = hub.add(hub.READ,
self.getsockopt(FD),
event,
lambda _: None,
lambda: None)
self.__dict__['_eventlet_clock'] = hub.clock
@_wraps(_Socket.close)
def close(self, linger=None):
super().close(linger)
if self._eventlet_listener is not None:
eventlet.hubs.get_hub().remove(self._eventlet_listener)
self.__dict__['_eventlet_listener'] = None
# wake any blocked threads
self._eventlet_send_event.wake()
self._eventlet_recv_event.wake()
@_wraps(_Socket.getsockopt)
def getsockopt(self, option):
result = _Socket_getsockopt(self, option)
if option == EVENTS:
# Getting the events causes the zmq socket to process
# events which may mean a msg can be sent or received. If
# there is a greenthread blocked and waiting for events,
# it will miss the edge-triggered read event, so wake it
# up.
if (result & POLLOUT):
self._eventlet_send_event.wake()
if (result & POLLIN):
self._eventlet_recv_event.wake()
return result
@_wraps(_Socket.send)
def send(self, msg, flags=0, copy=True, track=False):
"""A send method that's safe to use when multiple greenthreads
are calling send, send_multipart, recv and recv_multipart on
the same socket.
"""
if flags & NOBLOCK:
result = _Socket_send(self, msg, flags, copy, track)
# Instead of calling both wake methods, could call
# self.getsockopt(EVENTS) which would trigger wakeups if
# needed.
self._eventlet_send_event.wake()
self._eventlet_recv_event.wake()
return result
# TODO: pyzmq will copy the message buffer and create Message
# objects under some circumstances. We could do that work here
# once to avoid doing it every time the send is retried.
flags |= NOBLOCK
with self._eventlet_send_lock:
while True:
try:
return _Socket_send(self, msg, flags, copy, track)
except ZMQError as e:
if e.errno == EAGAIN:
self._eventlet_send_event.block()
else:
raise
finally:
# The call to send processes 0mq events and may
# make the socket ready to recv. Wake the next
# receiver. (Could check EVENTS for POLLIN here)
self._eventlet_recv_event.wake()
@_wraps(_Socket.send_multipart)
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
"""A send_multipart method that's safe to use when multiple
greenthreads are calling send, send_multipart, recv and
recv_multipart on the same socket.
"""
if flags & NOBLOCK:
return _Socket_send_multipart(self, msg_parts, flags, copy, track)
# acquire lock here so the subsequent calls to send for the
# message parts after the first don't block
with self._eventlet_send_lock:
return _Socket_send_multipart(self, msg_parts, flags, copy, track)
@_wraps(_Socket.send_string)
def send_string(self, u, flags=0, copy=True, encoding='utf-8'):
"""A send_string method that's safe to use when multiple
greenthreads are calling send, send_string, recv and
recv_string on the same socket.
"""
if flags & NOBLOCK:
return _Socket_send_string(self, u, flags, copy, encoding)
# acquire lock here so the subsequent calls to send for the
# message parts after the first don't block
with self._eventlet_send_lock:
return _Socket_send_string(self, u, flags, copy, encoding)
@_wraps(_Socket.send_pyobj)
def send_pyobj(self, obj, flags=0, protocol=2):
"""A send_pyobj method that's safe to use when multiple
greenthreads are calling send, send_pyobj, recv and
recv_pyobj on the same socket.
"""
if flags & NOBLOCK:
return _Socket_send_pyobj(self, obj, flags, protocol)
# acquire lock here so the subsequent calls to send for the
# message parts after the first don't block
with self._eventlet_send_lock:
return _Socket_send_pyobj(self, obj, flags, protocol)
@_wraps(_Socket.send_json)
def send_json(self, obj, flags=0, **kwargs):
"""A send_json method that's safe to use when multiple
greenthreads are calling send, send_json, recv and
recv_json on the same socket.
"""
if flags & NOBLOCK:
return _Socket_send_json(self, obj, flags, **kwargs)
# acquire lock here so the subsequent calls to send for the
# message parts after the first don't block
with self._eventlet_send_lock:
return _Socket_send_json(self, obj, flags, **kwargs)
@_wraps(_Socket.recv)
def recv(self, flags=0, copy=True, track=False):
"""A recv method that's safe to use when multiple greenthreads
are calling send, send_multipart, recv and recv_multipart on
the same socket.
"""
if flags & NOBLOCK:
msg = _Socket_recv(self, flags, copy, track)
# Instead of calling both wake methods, could call
# self.getsockopt(EVENTS) which would trigger wakeups if
# needed.
self._eventlet_send_event.wake()
self._eventlet_recv_event.wake()
return msg
deadline = None
if hasattr(__zmq__, 'RCVTIMEO'):
sock_timeout = self.getsockopt(__zmq__.RCVTIMEO)
if sock_timeout == -1:
pass
elif sock_timeout > 0:
deadline = self._eventlet_clock() + sock_timeout / 1000.0
else:
raise ValueError(sock_timeout)
flags |= NOBLOCK
with self._eventlet_recv_lock:
while True:
try:
return _Socket_recv(self, flags, copy, track)
except ZMQError as e:
if e.errno == EAGAIN:
# zmq in its wisdom decided to reuse EAGAIN for timeouts
if deadline is not None and self._eventlet_clock() > deadline:
e.is_timeout = True
raise
self._eventlet_recv_event.block(deadline=deadline)
else:
raise
finally:
# The call to recv processes 0mq events and may
# make the socket ready to send. Wake the next
# receiver. (Could check EVENTS for POLLOUT here)
self._eventlet_send_event.wake()
@_wraps(_Socket.recv_multipart)
def recv_multipart(self, flags=0, copy=True, track=False):
"""A recv_multipart method that's safe to use when multiple
greenthreads are calling send, send_multipart, recv and
recv_multipart on the same socket.
"""
if flags & NOBLOCK:
return _Socket_recv_multipart(self, flags, copy, track)
# acquire lock here so the subsequent calls to recv for the
# message parts after the first don't block
with self._eventlet_recv_lock:
return _Socket_recv_multipart(self, flags, copy, track)
@_wraps(_Socket.recv_string)
def recv_string(self, flags=0, encoding='utf-8'):
"""A recv_string method that's safe to use when multiple
greenthreads are calling send, send_string, recv and
recv_string on the same socket.
"""
if flags & NOBLOCK:
return _Socket_recv_string(self, flags, encoding)
# acquire lock here so the subsequent calls to recv for the
# message parts after the first don't block
with self._eventlet_recv_lock:
return _Socket_recv_string(self, flags, encoding)
@_wraps(_Socket.recv_json)
def recv_json(self, flags=0, **kwargs):
"""A recv_json method that's safe to use when multiple
greenthreads are calling send, send_json, recv and
recv_json on the same socket.
"""
if flags & NOBLOCK:
return _Socket_recv_json(self, flags, **kwargs)
# acquire lock here so the subsequent calls to recv for the
# message parts after the first don't block
with self._eventlet_recv_lock:
return _Socket_recv_json(self, flags, **kwargs)
@_wraps(_Socket.recv_pyobj)
def recv_pyobj(self, flags=0):
"""A recv_pyobj method that's safe to use when multiple
greenthreads are calling send, send_pyobj, recv and
recv_pyobj on the same socket.
"""
if flags & NOBLOCK:
return _Socket_recv_pyobj(self, flags)
# acquire lock here so the subsequent calls to recv for the
# message parts after the first don't block
with self._eventlet_recv_lock:
return _Socket_recv_pyobj(self, flags)