/usr/lib/python3/dist-packages/eventlet/tpool.py is in python3-eventlet 0.19.0-6.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 | # Copyright (c) 2007-2009, Linden Research, Inc.
# Copyright (c) 2007, IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import imp
import os
import sys
import traceback
import eventlet
from eventlet import event, greenio, greenthread, patcher, timeout
from eventlet.support import six
__all__ = ['execute', 'Proxy', 'killall', 'set_num_threads']
EXC_CLASSES = (Exception, timeout.Timeout)
SYS_EXCS = (GeneratorExit, KeyboardInterrupt, SystemExit)
QUIET = True
socket = patcher.original('socket')
threading = patcher.original('threading')
if six.PY2:
Queue_module = patcher.original('Queue')
if six.PY3:
Queue_module = patcher.original('queue')
Empty = Queue_module.Empty
Queue = Queue_module.Queue
_bytetosend = b' '
_coro = None
_nthreads = int(os.environ.get('EVENTLET_THREADPOOL_SIZE', 20))
_reqq = _rspq = None
_rsock = _wsock = None
_setup_already = False
_threads = []
def tpool_trampoline():
global _rspq
while True:
try:
_c = _rsock.recv(1)
assert _c
# FIXME: this is probably redundant since using sockets instead of pipe now
except ValueError:
break # will be raised when pipe is closed
while not _rspq.empty():
try:
(e, rv) = _rspq.get(block=False)
e.send(rv)
e = rv = None
except Empty:
pass
def tworker():
global _rspq
while True:
try:
msg = _reqq.get()
except AttributeError:
return # can't get anything off of a dud queue
if msg is None:
return
(e, meth, args, kwargs) = msg
rv = None
try:
rv = meth(*args, **kwargs)
except SYS_EXCS:
raise
except EXC_CLASSES:
rv = sys.exc_info()
# test_leakage_from_tracebacks verifies that the use of
# exc_info does not lead to memory leaks
_rspq.put((e, rv))
msg = meth = args = kwargs = e = rv = None
_wsock.sendall(_bytetosend)
def execute(meth, *args, **kwargs):
"""
Execute *meth* in a Python thread, blocking the current coroutine/
greenthread until the method completes.
The primary use case for this is to wrap an object or module that is not
amenable to monkeypatching or any of the other tricks that Eventlet uses
to achieve cooperative yielding. With tpool, you can force such objects to
cooperate with green threads by sticking them in native threads, at the cost
of some overhead.
"""
setup()
# if already in tpool, don't recurse into the tpool
# also, call functions directly if we're inside an import lock, because
# if meth does any importing (sadly common), it will hang
my_thread = threading.currentThread()
if my_thread in _threads or imp.lock_held() or _nthreads == 0:
return meth(*args, **kwargs)
e = event.Event()
_reqq.put((e, meth, args, kwargs))
rv = e.wait()
if isinstance(rv, tuple) \
and len(rv) == 3 \
and isinstance(rv[1], EXC_CLASSES):
(c, e, tb) = rv
if not QUIET:
traceback.print_exception(c, e, tb)
traceback.print_stack()
six.reraise(c, e, tb)
return rv
def proxy_call(autowrap, f, *args, **kwargs):
"""
Call a function *f* and returns the value. If the type of the return value
is in the *autowrap* collection, then it is wrapped in a :class:`Proxy`
object before return.
Normally *f* will be called in the threadpool with :func:`execute`; if the
keyword argument "nonblocking" is set to ``True``, it will simply be
executed directly. This is useful if you have an object which has methods
that don't need to be called in a separate thread, but which return objects
that should be Proxy wrapped.
"""
if kwargs.pop('nonblocking', False):
rv = f(*args, **kwargs)
else:
rv = execute(f, *args, **kwargs)
if isinstance(rv, autowrap):
return Proxy(rv, autowrap)
else:
return rv
class Proxy(object):
"""
a simple proxy-wrapper of any object that comes with a
methods-only interface, in order to forward every method
invocation onto a thread in the native-thread pool. A key
restriction is that the object's methods should not switch
greenlets or use Eventlet primitives, since they are in a
different thread from the main hub, and therefore might behave
unexpectedly. This is for running native-threaded code
only.
It's common to want to have some of the attributes or return
values also wrapped in Proxy objects (for example, database
connection objects produce cursor objects which also should be
wrapped in Proxy objects to remain nonblocking). *autowrap*, if
supplied, is a collection of types; if an attribute or return
value matches one of those types (via isinstance), it will be
wrapped in a Proxy. *autowrap_names* is a collection
of strings, which represent the names of attributes that should be
wrapped in Proxy objects when accessed.
"""
def __init__(self, obj, autowrap=(), autowrap_names=()):
self._obj = obj
self._autowrap = autowrap
self._autowrap_names = autowrap_names
def __getattr__(self, attr_name):
f = getattr(self._obj, attr_name)
if not hasattr(f, '__call__'):
if isinstance(f, self._autowrap) or attr_name in self._autowrap_names:
return Proxy(f, self._autowrap)
return f
def doit(*args, **kwargs):
result = proxy_call(self._autowrap, f, *args, **kwargs)
if attr_name in self._autowrap_names and not isinstance(result, Proxy):
return Proxy(result)
return result
return doit
# the following are a buncha methods that the python interpeter
# doesn't use getattr to retrieve and therefore have to be defined
# explicitly
def __getitem__(self, key):
return proxy_call(self._autowrap, self._obj.__getitem__, key)
def __setitem__(self, key, value):
return proxy_call(self._autowrap, self._obj.__setitem__, key, value)
def __deepcopy__(self, memo=None):
return proxy_call(self._autowrap, self._obj.__deepcopy__, memo)
def __copy__(self, memo=None):
return proxy_call(self._autowrap, self._obj.__copy__, memo)
def __call__(self, *a, **kw):
if '__call__' in self._autowrap_names:
return Proxy(proxy_call(self._autowrap, self._obj, *a, **kw))
else:
return proxy_call(self._autowrap, self._obj, *a, **kw)
def __enter__(self):
return proxy_call(self._autowrap, self._obj.__enter__)
def __exit__(self, *exc):
return proxy_call(self._autowrap, self._obj.__exit__, *exc)
# these don't go through a proxy call, because they're likely to
# be called often, and are unlikely to be implemented on the
# wrapped object in such a way that they would block
def __eq__(self, rhs):
return self._obj == rhs
def __hash__(self):
return self._obj.__hash__()
def __repr__(self):
return self._obj.__repr__()
def __str__(self):
return self._obj.__str__()
def __len__(self):
return len(self._obj)
def __nonzero__(self):
return bool(self._obj)
# Python3
__bool__ = __nonzero__
def __iter__(self):
it = iter(self._obj)
if it == self._obj:
return self
else:
return Proxy(it)
def next(self):
return proxy_call(self._autowrap, next, self._obj)
# Python3
__next__ = next
def setup():
global _rsock, _wsock, _coro, _setup_already, _rspq, _reqq
if _setup_already:
return
else:
_setup_already = True
assert _nthreads >= 0, "Can't specify negative number of threads"
if _nthreads == 0:
import warnings
warnings.warn("Zero threads in tpool. All tpool.execute calls will\
execute in main thread. Check the value of the environment \
variable EVENTLET_THREADPOOL_SIZE.", RuntimeWarning)
_reqq = Queue(maxsize=-1)
_rspq = Queue(maxsize=-1)
# connected socket pair
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
sock.listen(1)
csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(sock.getsockname())
csock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
_wsock, _addr = sock.accept()
_wsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
sock.close()
_rsock = greenio.GreenSocket(csock)
for i in six.moves.range(_nthreads):
t = threading.Thread(target=tworker,
name="tpool_thread_%s" % i)
t.setDaemon(True)
t.start()
_threads.append(t)
_coro = greenthread.spawn_n(tpool_trampoline)
# This yield fixes subtle error with GreenSocket.__del__
eventlet.sleep(0)
# Avoid ResourceWarning unclosed socket on Python3.2+
@atexit.register
def killall():
global _setup_already, _rspq, _rsock, _wsock
if not _setup_already:
return
# This yield fixes freeze in some scenarios
eventlet.sleep(0)
for thr in _threads:
_reqq.put(None)
for thr in _threads:
thr.join()
del _threads[:]
# return any remaining results
while (_rspq is not None) and not _rspq.empty():
try:
(e, rv) = _rspq.get(block=False)
e.send(rv)
e = rv = None
except Empty:
pass
if _coro is not None:
greenthread.kill(_coro)
if _rsock is not None:
_rsock.close()
_rsock = None
if _wsock is not None:
_wsock.close()
_wsock = None
_rspq = None
_setup_already = False
def set_num_threads(nthreads):
global _nthreads
_nthreads = nthreads
|