From 60aa5581c1b610c8d0250fe97dea0c1505f51966 Mon Sep 17 00:00:00 2001 From: Strahinja Val Markovic Date: Sun, 6 Oct 2013 18:24:52 -0700 Subject: [PATCH] Adding python-futures to third_party Download source: https://pythonfutures.googlecode.com/archive/05e0c9c1b3d493f0c7e5833723a1ea99d024bad4.zip --- third_party/pythonfutures/CHANGES | 44 ++ third_party/pythonfutures/LICENSE | 21 + .../pythonfutures/concurrent/__init__.py | 3 + .../concurrent/futures/__init__.py | 18 + .../pythonfutures/concurrent/futures/_base.py | 574 ++++++++++++++ .../concurrent/futures/_compat.py | 101 +++ .../concurrent/futures/process.py | 363 +++++++++ .../concurrent/futures/thread.py | 138 ++++ third_party/pythonfutures/crawl.py | 74 ++ third_party/pythonfutures/docs/conf.py | 194 +++++ third_party/pythonfutures/docs/index.rst | 345 +++++++++ third_party/pythonfutures/docs/make.bat | 112 +++ third_party/pythonfutures/futures/__init__.py | 24 + third_party/pythonfutures/futures/process.py | 1 + third_party/pythonfutures/futures/thread.py | 1 + third_party/pythonfutures/primes.py | 50 ++ third_party/pythonfutures/setup.cfg | 6 + third_party/pythonfutures/setup.py | 33 + third_party/pythonfutures/test_futures.py | 723 ++++++++++++++++++ third_party/pythonfutures/tox.ini | 8 + 20 files changed, 2833 insertions(+) create mode 100755 third_party/pythonfutures/CHANGES create mode 100755 third_party/pythonfutures/LICENSE create mode 100755 third_party/pythonfutures/concurrent/__init__.py create mode 100755 third_party/pythonfutures/concurrent/futures/__init__.py create mode 100755 third_party/pythonfutures/concurrent/futures/_base.py create mode 100755 third_party/pythonfutures/concurrent/futures/_compat.py create mode 100755 third_party/pythonfutures/concurrent/futures/process.py create mode 100755 third_party/pythonfutures/concurrent/futures/thread.py create mode 100755 third_party/pythonfutures/crawl.py create mode 100755 third_party/pythonfutures/docs/conf.py create mode 100755 third_party/pythonfutures/docs/index.rst create mode 100755 third_party/pythonfutures/docs/make.bat create mode 100755 third_party/pythonfutures/futures/__init__.py create mode 100755 third_party/pythonfutures/futures/process.py create mode 100755 third_party/pythonfutures/futures/thread.py create mode 100755 third_party/pythonfutures/primes.py create mode 100755 third_party/pythonfutures/setup.cfg create mode 100755 third_party/pythonfutures/setup.py create mode 100755 third_party/pythonfutures/test_futures.py create mode 100755 third_party/pythonfutures/tox.ini diff --git a/third_party/pythonfutures/CHANGES b/third_party/pythonfutures/CHANGES new file mode 100755 index 00000000..81df636f --- /dev/null +++ b/third_party/pythonfutures/CHANGES @@ -0,0 +1,44 @@ +2.1.4 +===== + +- Ported the library again from Python 3.2.5 to get the latest bug fixes + + +2.1.3 +===== + +- Fixed race condition in wait(return_when=ALL_COMPLETED) + (http://bugs.python.org/issue14406) -- thanks Ralf Schmitt +- Added missing setUp() methods to several test classes + + +2.1.2 +===== + +- Fixed installation problem on Python 3.1 + + +2.1.1 +===== + +- Fixed missing 'concurrent' package declaration in setup.py + + +2.1 +=== + +- Moved the code from the 'futures' package to 'concurrent.futures' to provide + a drop in backport that matches the code in Python 3.2 standard library +- Deprecated the old 'futures' package + + +2.0 +=== + +- Changed implementation to match PEP 3148 + + +1.0 +=== + +Initial release. diff --git a/third_party/pythonfutures/LICENSE b/third_party/pythonfutures/LICENSE new file mode 100755 index 00000000..c430db0f --- /dev/null +++ b/third_party/pythonfutures/LICENSE @@ -0,0 +1,21 @@ +Copyright 2009 Brian Quinlan. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY BRIAN QUINLAN "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT +HALL THE FREEBSD PROJECT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/third_party/pythonfutures/concurrent/__init__.py b/third_party/pythonfutures/concurrent/__init__.py new file mode 100755 index 00000000..b36383a6 --- /dev/null +++ b/third_party/pythonfutures/concurrent/__init__.py @@ -0,0 +1,3 @@ +from pkgutil import extend_path + +__path__ = extend_path(__path__, __name__) diff --git a/third_party/pythonfutures/concurrent/futures/__init__.py b/third_party/pythonfutures/concurrent/futures/__init__.py new file mode 100755 index 00000000..b5231f8a --- /dev/null +++ b/third_party/pythonfutures/concurrent/futures/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Execute computations asynchronously using threads or processes.""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +from concurrent.futures._base import (FIRST_COMPLETED, + FIRST_EXCEPTION, + ALL_COMPLETED, + CancelledError, + TimeoutError, + Future, + Executor, + wait, + as_completed) +from concurrent.futures.process import ProcessPoolExecutor +from concurrent.futures.thread import ThreadPoolExecutor diff --git a/third_party/pythonfutures/concurrent/futures/_base.py b/third_party/pythonfutures/concurrent/futures/_base.py new file mode 100755 index 00000000..8ed69b7d --- /dev/null +++ b/third_party/pythonfutures/concurrent/futures/_base.py @@ -0,0 +1,574 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +from __future__ import with_statement +import logging +import threading +import time + +try: + from collections import namedtuple +except ImportError: + from concurrent.futures._compat import namedtuple + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +FIRST_COMPLETED = 'FIRST_COMPLETED' +FIRST_EXCEPTION = 'FIRST_EXCEPTION' +ALL_COMPLETED = 'ALL_COMPLETED' +_AS_COMPLETED = '_AS_COMPLETED' + +# Possible future states (for internal use by the futures package). +PENDING = 'PENDING' +RUNNING = 'RUNNING' +# The future was cancelled by the user... +CANCELLED = 'CANCELLED' +# ...and _Waiter.add_cancelled() was called by a worker. +CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' +FINISHED = 'FINISHED' + +_FUTURE_STATES = [ + PENDING, + RUNNING, + CANCELLED, + CANCELLED_AND_NOTIFIED, + FINISHED +] + +_STATE_TO_DESCRIPTION_MAP = { + PENDING: "pending", + RUNNING: "running", + CANCELLED: "cancelled", + CANCELLED_AND_NOTIFIED: "cancelled", + FINISHED: "finished" +} + +# Logger for internal use by the futures package. +LOGGER = logging.getLogger("concurrent.futures") + +class Error(Exception): + """Base class for all future-related exceptions.""" + pass + +class CancelledError(Error): + """The Future was cancelled.""" + pass + +class TimeoutError(Error): + """The operation exceeded the given deadline.""" + pass + +class _Waiter(object): + """Provides the event that wait() and as_completed() block on.""" + def __init__(self): + self.event = threading.Event() + self.finished_futures = [] + + def add_result(self, future): + self.finished_futures.append(future) + + def add_exception(self, future): + self.finished_futures.append(future) + + def add_cancelled(self, future): + self.finished_futures.append(future) + +class _AsCompletedWaiter(_Waiter): + """Used by as_completed().""" + + def __init__(self): + super(_AsCompletedWaiter, self).__init__() + self.lock = threading.Lock() + + def add_result(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_result(future) + self.event.set() + + def add_exception(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_exception(future) + self.event.set() + + def add_cancelled(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_cancelled(future) + self.event.set() + +class _FirstCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_COMPLETED).""" + + def add_result(self, future): + super(_FirstCompletedWaiter, self).add_result(future) + self.event.set() + + def add_exception(self, future): + super(_FirstCompletedWaiter, self).add_exception(future) + self.event.set() + + def add_cancelled(self, future): + super(_FirstCompletedWaiter, self).add_cancelled(future) + self.event.set() + +class _AllCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).""" + + def __init__(self, num_pending_calls, stop_on_exception): + self.num_pending_calls = num_pending_calls + self.stop_on_exception = stop_on_exception + self.lock = threading.Lock() + super(_AllCompletedWaiter, self).__init__() + + def _decrement_pending_calls(self): + with self.lock: + self.num_pending_calls -= 1 + if not self.num_pending_calls: + self.event.set() + + def add_result(self, future): + super(_AllCompletedWaiter, self).add_result(future) + self._decrement_pending_calls() + + def add_exception(self, future): + super(_AllCompletedWaiter, self).add_exception(future) + if self.stop_on_exception: + self.event.set() + else: + self._decrement_pending_calls() + + def add_cancelled(self, future): + super(_AllCompletedWaiter, self).add_cancelled(future) + self._decrement_pending_calls() + +class _AcquireFutures(object): + """A context manager that does an ordered acquire of Future conditions.""" + + def __init__(self, futures): + self.futures = sorted(futures, key=id) + + def __enter__(self): + for future in self.futures: + future._condition.acquire() + + def __exit__(self, *args): + for future in self.futures: + future._condition.release() + +def _create_and_install_waiters(fs, return_when): + if return_when == _AS_COMPLETED: + waiter = _AsCompletedWaiter() + elif return_when == FIRST_COMPLETED: + waiter = _FirstCompletedWaiter() + else: + pending_count = sum( + f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) + + if return_when == FIRST_EXCEPTION: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) + elif return_when == ALL_COMPLETED: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) + else: + raise ValueError("Invalid return condition: %r" % return_when) + + for f in fs: + f._waiters.append(waiter) + + return waiter + +def as_completed(fs, timeout=None): + """An iterator over the given futures that yields each as it completes. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + iterate over. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator that yields the given Futures as they complete (finished or + cancelled). + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + """ + if timeout is not None: + end_time = timeout + time.time() + + with _AcquireFutures(fs): + finished = set( + f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + pending = set(fs) - finished + waiter = _create_and_install_waiters(fs, _AS_COMPLETED) + + try: + for future in finished: + yield future + + while pending: + if timeout is None: + wait_timeout = None + else: + wait_timeout = end_time - time.time() + if wait_timeout < 0: + raise TimeoutError( + '%d (of %d) futures unfinished' % ( + len(pending), len(fs))) + + waiter.event.wait(wait_timeout) + + with waiter.lock: + finished = waiter.finished_futures + waiter.finished_futures = [] + waiter.event.clear() + + for future in finished: + yield future + pending.remove(future) + + finally: + for f in fs: + f._waiters.remove(waiter) + +DoneAndNotDoneFutures = namedtuple( + 'DoneAndNotDoneFutures', 'done not_done') +def wait(fs, timeout=None, return_when=ALL_COMPLETED): + """Wait for the futures in the given sequence to complete. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + wait upon. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + return_when: Indicates when this function should return. The options + are: + + FIRST_COMPLETED - Return when any future finishes or is + cancelled. + FIRST_EXCEPTION - Return when any future finishes by raising an + exception. If no future raises an exception + then it is equivalent to ALL_COMPLETED. + ALL_COMPLETED - Return when all futures finish or are cancelled. + + Returns: + A named 2-tuple of sets. The first set, named 'done', contains the + futures that completed (is finished or cancelled) before the wait + completed. The second set, named 'not_done', contains uncompleted + futures. + """ + with _AcquireFutures(fs): + done = set(f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + not_done = set(fs) - done + + if (return_when == FIRST_COMPLETED) and done: + return DoneAndNotDoneFutures(done, not_done) + elif (return_when == FIRST_EXCEPTION) and done: + if any(f for f in done + if not f.cancelled() and f.exception() is not None): + return DoneAndNotDoneFutures(done, not_done) + + if len(done) == len(fs): + return DoneAndNotDoneFutures(done, not_done) + + waiter = _create_and_install_waiters(fs, return_when) + + waiter.event.wait(timeout) + for f in fs: + f._waiters.remove(waiter) + + done.update(waiter.finished_futures) + return DoneAndNotDoneFutures(done, set(fs) - done) + +class Future(object): + """Represents the result of an asynchronous computation.""" + + def __init__(self): + """Initializes the future. Should not be called by clients.""" + self._condition = threading.Condition() + self._state = PENDING + self._result = None + self._exception = None + self._waiters = [] + self._done_callbacks = [] + + def _invoke_callbacks(self): + for callback in self._done_callbacks: + try: + callback(self) + except Exception: + LOGGER.exception('exception calling callback for %r', self) + + def __repr__(self): + with self._condition: + if self._state == FINISHED: + if self._exception: + return '' % ( + hex(id(self)), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._exception.__class__.__name__) + else: + return '' % ( + hex(id(self)), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._result.__class__.__name__) + return '' % ( + hex(id(self)), + _STATE_TO_DESCRIPTION_MAP[self._state]) + + def cancel(self): + """Cancel the future if possible. + + Returns True if the future was cancelled, False otherwise. A future + cannot be cancelled if it is running or has already completed. + """ + with self._condition: + if self._state in [RUNNING, FINISHED]: + return False + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + return True + + self._state = CANCELLED + self._condition.notify_all() + + self._invoke_callbacks() + return True + + def cancelled(self): + """Return True if the future has cancelled.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] + + def running(self): + """Return True if the future is currently executing.""" + with self._condition: + return self._state == RUNNING + + def done(self): + """Return True of the future was cancelled or finished executing.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED] + + def __get_result(self): + if self._exception: + raise self._exception + else: + return self._result + + def add_done_callback(self, fn): + """Attaches a callable that will be called when the future finishes. + + Args: + fn: A callable that will be called with this future as its only + argument when the future completes or is cancelled. The callable + will always be called by a thread in the same process in which + it was added. If the future has already completed or been + cancelled then the callable will be called immediately. These + callables are called in the order that they were added. + """ + with self._condition: + if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: + self._done_callbacks.append(fn) + return + fn(self) + + def result(self, timeout=None): + """Return the result of the call that the future represents. + + Args: + timeout: The number of seconds to wait for the result if the future + isn't done. If None, then there is no limit on the wait time. + + Returns: + The result of the call that the future represents. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + Exception: If the call raised then that exception will be raised. + """ + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + else: + raise TimeoutError() + + def exception(self, timeout=None): + """Return the exception raised by the call that the future represents. + + Args: + timeout: The number of seconds to wait for the exception if the + future isn't done. If None, then there is no limit on the wait + time. + + Returns: + The exception raised by the call that the future represents or None + if the call completed without raising. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + """ + + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception + else: + raise TimeoutError() + + # The following methods should only be used by Executors and in tests. + def set_running_or_notify_cancel(self): + """Mark the future as running or process any cancel notifications. + + Should only be used by Executor implementations and unit tests. + + If the future has been cancelled (cancel() was called and returned + True) then any threads waiting on the future completing (though calls + to as_completed() or wait()) are notified and False is returned. + + If the future was not cancelled then it is put in the running state + (future calls to running() will return True) and True is returned. + + This method should be called by Executor implementations before + executing the work associated with this future. If this method returns + False then the work should not be executed. + + Returns: + False if the Future was cancelled, True otherwise. + + Raises: + RuntimeError: if this method was already called or if set_result() + or set_exception() was called. + """ + with self._condition: + if self._state == CANCELLED: + self._state = CANCELLED_AND_NOTIFIED + for waiter in self._waiters: + waiter.add_cancelled(self) + # self._condition.notify_all() is not necessary because + # self.cancel() triggers a notification. + return False + elif self._state == PENDING: + self._state = RUNNING + return True + else: + LOGGER.critical('Future %s in unexpected state: %s', + id(self.future), + self.future._state) + raise RuntimeError('Future in unexpected state') + + def set_result(self, result): + """Sets the return value of work associated with the future. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + self._result = result + self._state = FINISHED + for waiter in self._waiters: + waiter.add_result(self) + self._condition.notify_all() + self._invoke_callbacks() + + def set_exception(self, exception): + """Sets the result of the future as being the given exception. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + self._exception = exception + self._state = FINISHED + for waiter in self._waiters: + waiter.add_exception(self) + self._condition.notify_all() + self._invoke_callbacks() + +class Executor(object): + """This is an abstract base class for concrete asynchronous executors.""" + + def submit(self, fn, *args, **kwargs): + """Submits a callable to be executed with the given arguments. + + Schedules the callable to be executed as fn(*args, **kwargs) and returns + a Future instance representing the execution of the callable. + + Returns: + A Future representing the given call. + """ + raise NotImplementedError() + + def map(self, fn, *iterables, **kwargs): + """Returns a iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + timeout = kwargs.get('timeout') + if timeout is not None: + end_time = timeout + time.time() + + fs = [self.submit(fn, *args) for args in zip(*iterables)] + + try: + for future in fs: + if timeout is None: + yield future.result() + else: + yield future.result(end_time - time.time()) + finally: + for future in fs: + future.cancel() + + def shutdown(self, wait=True): + """Clean-up the resources associated with the Executor. + + It is safe to call this method several times. Otherwise, no other + methods can be called after this one. + + Args: + wait: If True then shutdown will not return until all running + futures have finished executing and the resources used by the + executor have been reclaimed. + """ + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown(wait=True) + return False diff --git a/third_party/pythonfutures/concurrent/futures/_compat.py b/third_party/pythonfutures/concurrent/futures/_compat.py new file mode 100755 index 00000000..11462326 --- /dev/null +++ b/third_party/pythonfutures/concurrent/futures/_compat.py @@ -0,0 +1,101 @@ +from keyword import iskeyword as _iskeyword +from operator import itemgetter as _itemgetter +import sys as _sys + + +def namedtuple(typename, field_names): + """Returns a new subclass of tuple with named fields. + + >>> Point = namedtuple('Point', 'x y') + >>> Point.__doc__ # docstring for the new class + 'Point(x, y)' + >>> p = Point(11, y=22) # instantiate with positional args or keywords + >>> p[0] + p[1] # indexable like a plain tuple + 33 + >>> x, y = p # unpack like a regular tuple + >>> x, y + (11, 22) + >>> p.x + p.y # fields also accessable by name + 33 + >>> d = p._asdict() # convert to a dictionary + >>> d['x'] + 11 + >>> Point(**d) # convert from a dictionary + Point(x=11, y=22) + >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields + Point(x=100, y=22) + + """ + + # Parse and validate the field names. Validation serves two purposes, + # generating informative error messages and preventing template injection attacks. + if isinstance(field_names, basestring): + field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas + field_names = tuple(map(str, field_names)) + for name in (typename,) + field_names: + if not all(c.isalnum() or c=='_' for c in name): + raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name) + if _iskeyword(name): + raise ValueError('Type names and field names cannot be a keyword: %r' % name) + if name[0].isdigit(): + raise ValueError('Type names and field names cannot start with a number: %r' % name) + seen_names = set() + for name in field_names: + if name.startswith('_'): + raise ValueError('Field names cannot start with an underscore: %r' % name) + if name in seen_names: + raise ValueError('Encountered duplicate field name: %r' % name) + seen_names.add(name) + + # Create and fill-in the class template + numfields = len(field_names) + argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes + reprtxt = ', '.join('%s=%%r' % name for name in field_names) + dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names)) + template = '''class %(typename)s(tuple): + '%(typename)s(%(argtxt)s)' \n + __slots__ = () \n + _fields = %(field_names)r \n + def __new__(_cls, %(argtxt)s): + return _tuple.__new__(_cls, (%(argtxt)s)) \n + @classmethod + def _make(cls, iterable, new=tuple.__new__, len=len): + 'Make a new %(typename)s object from a sequence or iterable' + result = new(cls, iterable) + if len(result) != %(numfields)d: + raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result)) + return result \n + def __repr__(self): + return '%(typename)s(%(reprtxt)s)' %% self \n + def _asdict(t): + 'Return a new dict which maps field names to their values' + return {%(dicttxt)s} \n + def _replace(_self, **kwds): + 'Return a new %(typename)s object replacing specified fields with new values' + result = _self._make(map(kwds.pop, %(field_names)r, _self)) + if kwds: + raise ValueError('Got unexpected field names: %%r' %% kwds.keys()) + return result \n + def __getnewargs__(self): + return tuple(self) \n\n''' % locals() + for i, name in enumerate(field_names): + template += ' %s = _property(_itemgetter(%d))\n' % (name, i) + + # Execute the template string in a temporary namespace and + # support tracing utilities by setting a value for frame.f_globals['__name__'] + namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename, + _property=property, _tuple=tuple) + try: + exec(template, namespace) + except SyntaxError: + e = _sys.exc_info()[1] + raise SyntaxError(e.message + ':\n' + template) + result = namespace[typename] + + # For pickling to work, the __module__ variable needs to be set to the frame + # where the named tuple is created. Bypass this step in enviroments where + # sys._getframe is not defined (Jython for example). + if hasattr(_sys, '_getframe'): + result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__') + + return result diff --git a/third_party/pythonfutures/concurrent/futures/process.py b/third_party/pythonfutures/concurrent/futures/process.py new file mode 100755 index 00000000..98684f8e --- /dev/null +++ b/third_party/pythonfutures/concurrent/futures/process.py @@ -0,0 +1,363 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ProcessPoolExecutor. + +The follow diagram and text describe the data-flow through the system: + +|======================= In-process =====================|== Out-of-process ==| + ++----------+ +----------+ +--------+ +-----------+ +---------+ +| | => | Work Ids | => | | => | Call Q | => | | +| | +----------+ | | +-----------+ | | +| | | ... | | | | ... | | | +| | | 6 | | | | 5, call() | | | +| | | 7 | | | | ... | | | +| Process | | ... | | Local | +-----------+ | Process | +| Pool | +----------+ | Worker | | #1..n | +| Executor | | Thread | | | +| | +----------- + | | +-----------+ | | +| | <=> | Work Items | <=> | | <= | Result Q | <= | | +| | +------------+ | | +-----------+ | | +| | | 6: call() | | | | ... | | | +| | | future | | | | 4, result | | | +| | | ... | | | | 3, except | | | ++----------+ +------------+ +--------+ +-----------+ +---------+ + +Executor.submit() called: +- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict +- adds the id of the _WorkItem to the "Work Ids" queue + +Local worker thread: +- reads work ids from the "Work Ids" queue and looks up the corresponding + WorkItem from the "Work Items" dict: if the work item has been cancelled then + it is simply removed from the dict, otherwise it is repackaged as a + _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" + until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because + calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). +- reads _ResultItems from "Result Q", updates the future stored in the + "Work Items" dict and deletes the dict entry + +Process #1..n: +- reads _CallItems from "Call Q", executes the calls, and puts the resulting + _ResultItems in "Request Q" +""" + +from __future__ import with_statement +import atexit +import multiprocessing +import threading +import weakref +import sys + +from concurrent.futures import _base + +try: + import queue +except ImportError: + import Queue as queue + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +# Workers are created as daemon threads and processes. This is done to allow the +# interpreter to exit when there are still idle processes in a +# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However, +# allowing workers to die with the interpreter has two undesirable properties: +# - The workers would still be running during interpretor shutdown, +# meaning that they would fail in unpredictable ways. +# - The workers could be killed while evaluating a work item, which could +# be bad if the callable being evaluated has external side-effects e.g. +# writing to a file. +# +# To work around this problem, an exit handler is installed which tells the +# workers to exit when their work queues are empty and then waits until the +# threads/processes finish. + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False + +def _python_exit(): + global _shutdown + _shutdown = True + items = list(_threads_queues.items()) + for t, q in items: + q.put(None) + for t, q in items: + t.join() + +# Controls how many more calls than processes will be queued in the call queue. +# A smaller number will mean that processes spend more time idle waiting for +# work while a larger number will make Future.cancel() succeed less frequently +# (Futures in the call queue cannot be cancelled). +EXTRA_QUEUED_CALLS = 1 + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + +class _ResultItem(object): + def __init__(self, work_id, exception=None, result=None): + self.work_id = work_id + self.exception = exception + self.result = result + +class _CallItem(object): + def __init__(self, work_id, fn, args, kwargs): + self.work_id = work_id + self.fn = fn + self.args = args + self.kwargs = kwargs + +def _process_worker(call_queue, result_queue): + """Evaluates calls from call_queue and places the results in result_queue. + + This worker is run in a separate process. + + Args: + call_queue: A multiprocessing.Queue of _CallItems that will be read and + evaluated by the worker. + result_queue: A multiprocessing.Queue of _ResultItems that will written + to by the worker. + shutdown: A multiprocessing.Event that will be set as a signal to the + worker that it should exit when call_queue is empty. + """ + while True: + call_item = call_queue.get(block=True) + if call_item is None: + # Wake up queue management thread + result_queue.put(None) + return + try: + r = call_item.fn(*call_item.args, **call_item.kwargs) + except BaseException: + e = sys.exc_info()[1] + result_queue.put(_ResultItem(call_item.work_id, + exception=e)) + else: + result_queue.put(_ResultItem(call_item.work_id, + result=r)) + +def _add_call_item_to_queue(pending_work_items, + work_ids, + call_queue): + """Fills call_queue with _WorkItems from pending_work_items. + + This function never blocks. + + Args: + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids + are consumed and the corresponding _WorkItems from + pending_work_items are transformed into _CallItems and put in + call_queue. + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems. + """ + while True: + if call_queue.full(): + return + try: + work_id = work_ids.get(block=False) + except queue.Empty: + return + else: + work_item = pending_work_items[work_id] + + if work_item.future.set_running_or_notify_cancel(): + call_queue.put(_CallItem(work_id, + work_item.fn, + work_item.args, + work_item.kwargs), + block=True) + else: + del pending_work_items[work_id] + continue + +def _queue_management_worker(executor_reference, + processes, + pending_work_items, + work_ids_queue, + call_queue, + result_queue): + """Manages the communication between this process and the worker processes. + + This function is run in a local thread. + + Args: + executor_reference: A weakref.ref to the ProcessPoolExecutor that owns + this thread. Used to determine if the ProcessPoolExecutor has been + garbage collected and that this function can exit. + process: A list of the multiprocessing.Process instances used as + workers. + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems for processing by the process workers. + result_queue: A multiprocessing.Queue of _ResultItems generated by the + process workers. + """ + nb_shutdown_processes = [0] + def shutdown_one_process(): + """Tell a worker to terminate, which will in turn wake us again""" + call_queue.put(None) + nb_shutdown_processes[0] += 1 + while True: + _add_call_item_to_queue(pending_work_items, + work_ids_queue, + call_queue) + + result_item = result_queue.get(block=True) + if result_item is not None: + work_item = pending_work_items[result_item.work_id] + del pending_work_items[result_item.work_id] + + if result_item.exception: + work_item.future.set_exception(result_item.exception) + else: + work_item.future.set_result(result_item.result) + # Check whether we should start shutting down. + executor = executor_reference() + # No more work items can be added if: + # - The interpreter is shutting down OR + # - The executor that owns this worker has been collected OR + # - The executor that owns this worker has been shutdown. + if _shutdown or executor is None or executor._shutdown_thread: + # Since no new work items can be added, it is safe to shutdown + # this thread if there are no pending work items. + if not pending_work_items: + while nb_shutdown_processes[0] < len(processes): + shutdown_one_process() + # If .join() is not called on the created processes then + # some multiprocessing.Queue methods may deadlock on Mac OS + # X. + for p in processes: + p.join() + call_queue.close() + return + del executor + +_system_limits_checked = False +_system_limited = None +def _check_system_limits(): + global _system_limits_checked, _system_limited + if _system_limits_checked: + if _system_limited: + raise NotImplementedError(_system_limited) + _system_limits_checked = True + try: + import os + nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems_max == -1: + # indetermine limit, assume that limit is determined + # by available memory only + return + if nsems_max >= 256: + # minimum number of semaphores available + # according to POSIX + return + _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max + raise NotImplementedError(_system_limited) + +class ProcessPoolExecutor(_base.Executor): + def __init__(self, max_workers=None): + """Initializes a new ProcessPoolExecutor instance. + + Args: + max_workers: The maximum number of processes that can be used to + execute the given calls. If None or not given then as many + worker processes will be created as the machine has processors. + """ + _check_system_limits() + + if max_workers is None: + self._max_workers = multiprocessing.cpu_count() + else: + self._max_workers = max_workers + + # Make the call queue slightly larger than the number of processes to + # prevent the worker processes from idling. But don't make it too big + # because futures in the call queue cannot be cancelled. + self._call_queue = multiprocessing.Queue(self._max_workers + + EXTRA_QUEUED_CALLS) + self._result_queue = multiprocessing.Queue() + self._work_ids = queue.Queue() + self._queue_management_thread = None + self._processes = set() + + # Shutdown is a two-step process. + self._shutdown_thread = False + self._shutdown_lock = threading.Lock() + self._queue_count = 0 + self._pending_work_items = {} + + def _start_queue_management_thread(self): + # When the executor gets lost, the weakref callback will wake up + # the queue management thread. + def weakref_cb(_, q=self._result_queue): + q.put(None) + if self._queue_management_thread is None: + self._queue_management_thread = threading.Thread( + target=_queue_management_worker, + args=(weakref.ref(self, weakref_cb), + self._processes, + self._pending_work_items, + self._work_ids, + self._call_queue, + self._result_queue)) + self._queue_management_thread.daemon = True + self._queue_management_thread.start() + _threads_queues[self._queue_management_thread] = self._result_queue + + def _adjust_process_count(self): + for _ in range(len(self._processes), self._max_workers): + p = multiprocessing.Process( + target=_process_worker, + args=(self._call_queue, + self._result_queue)) + p.start() + self._processes.add(p) + + def submit(self, fn, *args, **kwargs): + with self._shutdown_lock: + if self._shutdown_thread: + raise RuntimeError('cannot schedule new futures after shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._pending_work_items[self._queue_count] = w + self._work_ids.put(self._queue_count) + self._queue_count += 1 + # Wake up queue management thread + self._result_queue.put(None) + + self._start_queue_management_thread() + self._adjust_process_count() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def shutdown(self, wait=True): + with self._shutdown_lock: + self._shutdown_thread = True + if self._queue_management_thread: + # Wake up queue management thread + self._result_queue.put(None) + if wait: + self._queue_management_thread.join() + # To reduce the risk of openning too many files, remove references to + # objects that use file descriptors. + self._queue_management_thread = None + self._call_queue = None + self._result_queue = None + self._processes = None + shutdown.__doc__ = _base.Executor.shutdown.__doc__ + +atexit.register(_python_exit) diff --git a/third_party/pythonfutures/concurrent/futures/thread.py b/third_party/pythonfutures/concurrent/futures/thread.py new file mode 100755 index 00000000..a45959d3 --- /dev/null +++ b/third_party/pythonfutures/concurrent/futures/thread.py @@ -0,0 +1,138 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ThreadPoolExecutor.""" + +from __future__ import with_statement +import atexit +import threading +import weakref +import sys + +from concurrent.futures import _base + +try: + import queue +except ImportError: + import Queue as queue + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +# Workers are created as daemon threads. This is done to allow the interpreter +# to exit when there are still idle threads in a ThreadPoolExecutor's thread +# pool (i.e. shutdown() was not called). However, allowing workers to die with +# the interpreter has two undesirable properties: +# - The workers would still be running during interpretor shutdown, +# meaning that they would fail in unpredictable ways. +# - The workers could be killed while evaluating a work item, which could +# be bad if the callable being evaluated has external side-effects e.g. +# writing to a file. +# +# To work around this problem, an exit handler is installed which tells the +# workers to exit when their work queues are empty and then waits until the +# threads finish. + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False + +def _python_exit(): + global _shutdown + _shutdown = True + items = list(_threads_queues.items()) + for t, q in items: + q.put(None) + for t, q in items: + t.join() + +atexit.register(_python_exit) + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + + def run(self): + if not self.future.set_running_or_notify_cancel(): + return + + try: + result = self.fn(*self.args, **self.kwargs) + except BaseException: + e = sys.exc_info()[1] + self.future.set_exception(e) + else: + self.future.set_result(result) + +def _worker(executor_reference, work_queue): + try: + while True: + work_item = work_queue.get(block=True) + if work_item is not None: + work_item.run() + continue + executor = executor_reference() + # Exit if: + # - The interpreter is shutting down OR + # - The executor that owns the worker has been collected OR + # - The executor that owns the worker has been shutdown. + if _shutdown or executor is None or executor._shutdown: + # Notice other workers + work_queue.put(None) + return + del executor + except BaseException: + _base.LOGGER.critical('Exception in worker', exc_info=True) + +class ThreadPoolExecutor(_base.Executor): + def __init__(self, max_workers): + """Initializes a new ThreadPoolExecutor instance. + + Args: + max_workers: The maximum number of threads that can be used to + execute the given calls. + """ + self._max_workers = max_workers + self._work_queue = queue.Queue() + self._threads = set() + self._shutdown = False + self._shutdown_lock = threading.Lock() + + def submit(self, fn, *args, **kwargs): + with self._shutdown_lock: + if self._shutdown: + raise RuntimeError('cannot schedule new futures after shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._work_queue.put(w) + self._adjust_thread_count() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def _adjust_thread_count(self): + # When the executor gets lost, the weakref callback will wake up + # the worker threads. + def weakref_cb(_, q=self._work_queue): + q.put(None) + # TODO(bquinlan): Should avoid creating new threads if there are more + # idle threads than items in the work queue. + if len(self._threads) < self._max_workers: + t = threading.Thread(target=_worker, + args=(weakref.ref(self, weakref_cb), + self._work_queue)) + t.daemon = True + t.start() + self._threads.add(t) + _threads_queues[t] = self._work_queue + + def shutdown(self, wait=True): + with self._shutdown_lock: + self._shutdown = True + self._work_queue.put(None) + if wait: + for t in self._threads: + t.join() + shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/third_party/pythonfutures/crawl.py b/third_party/pythonfutures/crawl.py new file mode 100755 index 00000000..86e0af7f --- /dev/null +++ b/third_party/pythonfutures/crawl.py @@ -0,0 +1,74 @@ +"""Compare the speed of downloading URLs sequentially vs. using futures.""" + +import functools +import time +import timeit +import sys + +try: + from urllib2 import urlopen +except ImportError: + from urllib.request import urlopen + +from concurrent.futures import (as_completed, ThreadPoolExecutor, + ProcessPoolExecutor) + +URLS = ['http://www.google.com/', + 'http://www.apple.com/', + 'http://www.ibm.com', + 'http://www.thisurlprobablydoesnotexist.com', + 'http://www.slashdot.org/', + 'http://www.python.org/', + 'http://www.bing.com/', + 'http://www.facebook.com/', + 'http://www.yahoo.com/', + 'http://www.youtube.com/', + 'http://www.blogger.com/'] + +def load_url(url, timeout): + kwargs = {'timeout': timeout} if sys.version_info >= (2, 6) else {} + return urlopen(url, **kwargs).read() + +def download_urls_sequential(urls, timeout=60): + url_to_content = {} + for url in urls: + try: + url_to_content[url] = load_url(url, timeout=timeout) + except: + pass + return url_to_content + +def download_urls_with_executor(urls, executor, timeout=60): + try: + url_to_content = {} + future_to_url = dict((executor.submit(load_url, url, timeout), url) + for url in urls) + + for future in as_completed(future_to_url): + try: + url_to_content[future_to_url[future]] = future.result() + except: + pass + return url_to_content + finally: + executor.shutdown() + +def main(): + for name, fn in [('sequential', + functools.partial(download_urls_sequential, URLS)), + ('processes', + functools.partial(download_urls_with_executor, + URLS, + ProcessPoolExecutor(10))), + ('threads', + functools.partial(download_urls_with_executor, + URLS, + ThreadPoolExecutor(10)))]: + sys.stdout.write('%s: ' % name.ljust(12)) + start = time.time() + url_map = fn() + sys.stdout.write('%.2f seconds (%d of %d downloaded)\n' % + (time.time() - start, len(url_map), len(URLS))) + +if __name__ == '__main__': + main() diff --git a/third_party/pythonfutures/docs/conf.py b/third_party/pythonfutures/docs/conf.py new file mode 100755 index 00000000..124cd518 --- /dev/null +++ b/third_party/pythonfutures/docs/conf.py @@ -0,0 +1,194 @@ +# -*- coding: utf-8 -*- +# +# futures documentation build configuration file, created by +# sphinx-quickstart on Wed Jun 3 19:35:34 2009. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.append(os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'futures' +copyright = u'2009-2011, Brian Quinlan' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '2.1.3' +# The full version, including alpha/beta/rc tags. +release = '2.1.3' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +#unused_docs = [] + +# List of directories, relative to source directory, that shouldn't be searched +# for source files. +exclude_trees = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_use_modindex = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'futuresdoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'futures.tex', u'futures Documentation', + u'Brian Quinlan', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_use_modindex = True diff --git a/third_party/pythonfutures/docs/index.rst b/third_party/pythonfutures/docs/index.rst new file mode 100755 index 00000000..525ce6ab --- /dev/null +++ b/third_party/pythonfutures/docs/index.rst @@ -0,0 +1,345 @@ +:mod:`concurrent.futures` --- Asynchronous computation +====================================================== + +.. module:: concurrent.futures + :synopsis: Execute computations asynchronously using threads or processes. + +The :mod:`concurrent.futures` module provides a high-level interface for +asynchronously executing callables. + +The asynchronous execution can be be performed by threads using +:class:`ThreadPoolExecutor` or seperate processes using +:class:`ProcessPoolExecutor`. Both implement the same interface, which is +defined by the abstract :class:`Executor` class. + +Executor Objects +---------------- + +:class:`Executor` is an abstract class that provides methods to execute calls +asynchronously. It should not be used directly, but through its two +subclasses: :class:`ThreadPoolExecutor` and :class:`ProcessPoolExecutor`. + +.. method:: Executor.submit(fn, *args, **kwargs) + + Schedules the callable to be executed as *fn*(*\*args*, *\*\*kwargs*) and + returns a :class:`Future` representing the execution of the callable. + +:: + + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(pow, 323, 1235) + print(future.result()) + +.. method:: Executor.map(func, *iterables, timeout=None) + + Equivalent to map(*func*, *\*iterables*) but func is executed asynchronously + and several calls to *func* may be made concurrently. The returned iterator + raises a :exc:`TimeoutError` if :meth:`__next__()` is called and the result + isn't available after *timeout* seconds from the original call to + :meth:`map()`. *timeout* can be an int or float. If *timeout* is not + specified or ``None`` then there is no limit to the wait time. If a call + raises an exception then that exception will be raised when its value is + retrieved from the iterator. + +.. method:: Executor.shutdown(wait=True) + + Signal the executor that it should free any resources that it is using when + the currently pending futures are done executing. Calls to + :meth:`Executor.submit` and :meth:`Executor.map` made after shutdown will + raise :exc:`RuntimeError`. + + If *wait* is `True` then this method will not return until all the pending + futures are done executing and the resources associated with the executor + have been freed. If *wait* is `False` then this method will return + immediately and the resources associated with the executor will be freed + when all pending futures are done executing. Regardless of the value of + *wait*, the entire Python program will not exit until all pending futures + are done executing. + + You can avoid having to call this method explicitly if you use the `with` + statement, which will shutdown the `Executor` (waiting as if + `Executor.shutdown` were called with *wait* set to `True`): + +:: + + import shutil + with ThreadPoolExecutor(max_workers=4) as e: + e.submit(shutil.copy, 'src1.txt', 'dest1.txt') + e.submit(shutil.copy, 'src2.txt', 'dest2.txt') + e.submit(shutil.copy, 'src3.txt', 'dest3.txt') + e.submit(shutil.copy, 'src3.txt', 'dest4.txt') + + +ThreadPoolExecutor Objects +-------------------------- + +The :class:`ThreadPoolExecutor` class is an :class:`Executor` subclass that uses +a pool of threads to execute calls asynchronously. + +Deadlock can occur when the callable associated with a :class:`Future` waits on +the results of another :class:`Future`. For example: + +:: + + import time + def wait_on_b(): + time.sleep(5) + print(b.result()) # b will never complete because it is waiting on a. + return 5 + + def wait_on_a(): + time.sleep(5) + print(a.result()) # a will never complete because it is waiting on b. + return 6 + + + executor = ThreadPoolExecutor(max_workers=2) + a = executor.submit(wait_on_b) + b = executor.submit(wait_on_a) + +And: + +:: + + def wait_on_future(): + f = executor.submit(pow, 5, 2) + # This will never complete because there is only one worker thread and + # it is executing this function. + print(f.result()) + + executor = ThreadPoolExecutor(max_workers=1) + executor.submit(wait_on_future) + +.. class:: ThreadPoolExecutor(max_workers) + + Executes calls asynchronously using at pool of at most *max_workers* threads. + +.. _threadpoolexecutor-example: + +ThreadPoolExecutor Example +^^^^^^^^^^^^^^^^^^^^^^^^^^ +:: + + from concurrent import futures + import urllib.request + + URLS = ['http://www.foxnews.com/', + 'http://www.cnn.com/', + 'http://europe.wsj.com/', + 'http://www.bbc.co.uk/', + 'http://some-made-up-domain.com/'] + + def load_url(url, timeout): + return urllib.request.urlopen(url, timeout=timeout).read() + + with futures.ThreadPoolExecutor(max_workers=5) as executor: + future_to_url = dict((executor.submit(load_url, url, 60), url) + for url in URLS) + + for future in futures.as_completed(future_to_url): + url = future_to_url[future] + if future.exception() is not None: + print('%r generated an exception: %s' % (url, + future.exception())) + else: + print('%r page is %d bytes' % (url, len(future.result()))) + +ProcessPoolExecutor Objects +--------------------------- + +The :class:`ProcessPoolExecutor` class is an :class:`Executor` subclass that +uses a pool of processes to execute calls asynchronously. +:class:`ProcessPoolExecutor` uses the :mod:`multiprocessing` module, which +allows it to side-step the :term:`Global Interpreter Lock` but also means that +only picklable objects can be executed and returned. + +Calling :class:`Executor` or :class:`Future` methods from a callable submitted +to a :class:`ProcessPoolExecutor` will result in deadlock. + +.. class:: ProcessPoolExecutor(max_workers=None) + + Executes calls asynchronously using a pool of at most *max_workers* + processes. If *max_workers* is ``None`` or not given then as many worker + processes will be created as the machine has processors. + +.. _processpoolexecutor-example: + +ProcessPoolExecutor Example +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:: + + import math + + PRIMES = [ + 112272535095293, + 112582705942171, + 112272535095293, + 115280095190773, + 115797848077099, + 1099726899285419] + + def is_prime(n): + if n % 2 == 0: + return False + + sqrt_n = int(math.floor(math.sqrt(n))) + for i in range(3, sqrt_n + 1, 2): + if n % i == 0: + return False + return True + + def main(): + with futures.ProcessPoolExecutor() as executor: + for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)): + print('%d is prime: %s' % (number, prime)) + + if __name__ == '__main__': + main() + +Future Objects +-------------- + +The :class:`Future` class encapulates the asynchronous execution of a callable. +:class:`Future` instances are created by :meth:`Executor.submit`. + +.. method:: Future.cancel() + + Attempt to cancel the call. If the call is currently being executed then + it cannot be cancelled and the method will return `False`, otherwise the call + will be cancelled and the method will return `True`. + +.. method:: Future.cancelled() + + Return `True` if the call was successfully cancelled. + +.. method:: Future.running() + + Return `True` if the call is currently being executed and cannot be + cancelled. + +.. method:: Future.done() + + Return `True` if the call was successfully cancelled or finished running. + +.. method:: Future.result(timeout=None) + + Return the value returned by the call. If the call hasn't yet completed then + this method will wait up to *timeout* seconds. If the call hasn't completed + in *timeout* seconds then a :exc:`TimeoutError` will be raised. *timeout* can + be an int or float.If *timeout* is not specified or ``None`` then there is no + limit to the wait time. + + If the future is cancelled before completing then :exc:`CancelledError` will + be raised. + + If the call raised then this method will raise the same exception. + +.. method:: Future.exception(timeout=None) + + Return the exception raised by the call. If the call hasn't yet completed + then this method will wait up to *timeout* seconds. If the call hasn't + completed in *timeout* seconds then a :exc:`TimeoutError` will be raised. + *timeout* can be an int or float. If *timeout* is not specified or ``None`` + then there is no limit to the wait time. + + If the future is cancelled before completing then :exc:`CancelledError` will + be raised. + + If the call completed without raising then ``None`` is returned. + +.. method:: Future.add_done_callback(fn) + + Attaches the callable *fn* to the future. *fn* will be called, with the + future as its only argument, when the future is cancelled or finishes + running. + + Added callables are called in the order that they were added and are always + called in a thread belonging to the process that added them. If the callable + raises an :exc:`Exception` then it will be logged and ignored. If the + callable raises another :exc:`BaseException` then the behavior is not + defined. + + If the future has already completed or been cancelled then *fn* will be + called immediately. + +Internal Future Methods +^^^^^^^^^^^^^^^^^^^^^^^ + +The following :class:`Future` methods are meant for use in unit tests and +:class:`Executor` implementations. + +.. method:: Future.set_running_or_notify_cancel() + + This method should only be called by :class:`Executor` implementations before + executing the work associated with the :class:`Future` and by unit tests. + + If the method returns `False` then the :class:`Future` was cancelled i.e. + :meth:`Future.cancel` was called and returned `True`. Any threads waiting + on the :class:`Future` completing (i.e. through :func:`as_completed` or + :func:`wait`) will be woken up. + + If the method returns `True` then the :class:`Future` was not cancelled + and has been put in the running state i.e. calls to + :meth:`Future.running` will return `True`. + + This method can only be called once and cannot be called after + :meth:`Future.set_result` or :meth:`Future.set_exception` have been + called. + +.. method:: Future.set_result(result) + + Sets the result of the work associated with the :class:`Future` to *result*. + + This method should only be used by Executor implementations and unit tests. + +.. method:: Future.set_exception(exception) + + Sets the result of the work associated with the :class:`Future` to the + :class:`Exception` *exception*. + + This method should only be used by Executor implementations and unit tests. + +Module Functions +---------------- + +.. function:: wait(fs, timeout=None, return_when=ALL_COMPLETED) + + Wait for the :class:`Future` instances (possibly created by different + :class:`Executor` instances) given by *fs* to complete. Returns a named + 2-tuple of sets. The first set, named "done", contains the futures that + completed (finished or were cancelled) before the wait completed. The second + set, named "not_done", contains uncompleted futures. + + *timeout* can be used to control the maximum number of seconds to wait before + returning. *timeout* can be an int or float. If *timeout* is not specified or + ``None`` then there is no limit to the wait time. + + *return_when* indicates when this function should return. It must be one of + the following constants: + + +-----------------------------+----------------------------------------+ + | Constant | Description | + +=============================+========================================+ + | :const:`FIRST_COMPLETED` | The function will return when any | + | | future finishes or is cancelled. | + +-----------------------------+----------------------------------------+ + | :const:`FIRST_EXCEPTION` | The function will return when any | + | | future finishes by raising an | + | | exception. If no future raises an | + | | exception then it is equivalent to | + | | `ALL_COMPLETED`. | + +-----------------------------+----------------------------------------+ + | :const:`ALL_COMPLETED` | The function will return when all | + | | futures finish or are cancelled. | + +-----------------------------+----------------------------------------+ + +.. function:: as_completed(fs, timeout=None) + + Returns an iterator over the :class:`Future` instances (possibly created + by different :class:`Executor` instances) given by *fs* that yields futures + as they complete (finished or were cancelled). Any futures that completed + before :func:`as_completed()` was called will be yielded first. The returned + iterator raises a :exc:`TimeoutError` if :meth:`__next__()` is called and + the result isn't available after *timeout* seconds from the original call + to :func:`as_completed()`. *timeout* can be an int or float. If *timeout* + is not specified or ``None`` then there is no limit to the wait time. diff --git a/third_party/pythonfutures/docs/make.bat b/third_party/pythonfutures/docs/make.bat new file mode 100755 index 00000000..3e8021b5 --- /dev/null +++ b/third_party/pythonfutures/docs/make.bat @@ -0,0 +1,112 @@ +@ECHO OFF + +REM Command file for Sphinx documentation + +set SPHINXBUILD=sphinx-build +set ALLSPHINXOPTS=-d _build/doctrees %SPHINXOPTS% . +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^` where ^ is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. changes to make an overview over all changed/added/deprecated items + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + goto end +) + +if "%1" == "clean" ( + for /d %%i in (_build\*) do rmdir /q /s %%i + del /q /s _build\* + goto end +) + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% _build/html + echo. + echo.Build finished. The HTML pages are in _build/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% _build/dirhtml + echo. + echo.Build finished. The HTML pages are in _build/dirhtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% _build/pickle + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% _build/json + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% _build/htmlhelp + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in _build/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% _build/qthelp + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in _build/qthelp, like this: + echo.^> qcollectiongenerator _build\qthelp\futures.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile _build\qthelp\futures.ghc + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% _build/latex + echo. + echo.Build finished; the LaTeX files are in _build/latex. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% _build/changes + echo. + echo.The overview file is in _build/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% _build/linkcheck + echo. + echo.Link check complete; look for any errors in the above output ^ +or in _build/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% _build/doctest + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in _build/doctest/output.txt. + goto end +) + +:end diff --git a/third_party/pythonfutures/futures/__init__.py b/third_party/pythonfutures/futures/__init__.py new file mode 100755 index 00000000..8f8b2348 --- /dev/null +++ b/third_party/pythonfutures/futures/__init__.py @@ -0,0 +1,24 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Execute computations asynchronously using threads or processes.""" + +import warnings + +from concurrent.futures import (FIRST_COMPLETED, + FIRST_EXCEPTION, + ALL_COMPLETED, + CancelledError, + TimeoutError, + Future, + Executor, + wait, + as_completed, + ProcessPoolExecutor, + ThreadPoolExecutor) + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +warnings.warn('The futures package has been deprecated. ' + 'Use the concurrent.futures package instead.', + DeprecationWarning) diff --git a/third_party/pythonfutures/futures/process.py b/third_party/pythonfutures/futures/process.py new file mode 100755 index 00000000..e9d37b16 --- /dev/null +++ b/third_party/pythonfutures/futures/process.py @@ -0,0 +1 @@ +from concurrent.futures import ProcessPoolExecutor diff --git a/third_party/pythonfutures/futures/thread.py b/third_party/pythonfutures/futures/thread.py new file mode 100755 index 00000000..f6bd05de --- /dev/null +++ b/third_party/pythonfutures/futures/thread.py @@ -0,0 +1 @@ +from concurrent.futures import ThreadPoolExecutor diff --git a/third_party/pythonfutures/primes.py b/third_party/pythonfutures/primes.py new file mode 100755 index 00000000..0da2b3e6 --- /dev/null +++ b/third_party/pythonfutures/primes.py @@ -0,0 +1,50 @@ +from __future__ import with_statement +import math +import time +import sys + +from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor + +PRIMES = [ + 112272535095293, + 112582705942171, + 112272535095293, + 115280095190773, + 115797848077099, + 117450548693743, + 993960000099397] + +def is_prime(n): + if n % 2 == 0: + return False + + sqrt_n = int(math.floor(math.sqrt(n))) + for i in range(3, sqrt_n + 1, 2): + if n % i == 0: + return False + return True + +def sequential(): + return list(map(is_prime, PRIMES)) + +def with_process_pool_executor(): + with ProcessPoolExecutor(10) as executor: + return list(executor.map(is_prime, PRIMES)) + +def with_thread_pool_executor(): + with ThreadPoolExecutor(10) as executor: + return list(executor.map(is_prime, PRIMES)) + +def main(): + for name, fn in [('sequential', sequential), + ('processes', with_process_pool_executor), + ('threads', with_thread_pool_executor)]: + sys.stdout.write('%s: ' % name.ljust(12)) + start = time.time() + if fn() != [True] * len(PRIMES): + sys.stdout.write('failed\n') + else: + sys.stdout.write('%.2f seconds\n' % (time.time() - start)) + +if __name__ == '__main__': + main() diff --git a/third_party/pythonfutures/setup.cfg b/third_party/pythonfutures/setup.cfg new file mode 100755 index 00000000..0a9f4f52 --- /dev/null +++ b/third_party/pythonfutures/setup.cfg @@ -0,0 +1,6 @@ +[build_sphinx] +source-dir = docs +build-dir = build/sphinx + +[upload_docs] +upload-dir = build/sphinx/html diff --git a/third_party/pythonfutures/setup.py b/third_party/pythonfutures/setup.py new file mode 100755 index 00000000..c08461ed --- /dev/null +++ b/third_party/pythonfutures/setup.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python +import sys + +extras = {} +try: + from setuptools import setup + extras['zip_safe'] = False + if sys.version_info < (2, 6): + extras['install_requires'] = ['multiprocessing'] +except ImportError: + from distutils.core import setup + +setup(name='futures', + version='2.1.4', + description='Backport of the concurrent.futures package from Python 3.2', + author='Brian Quinlan', + author_email='brian@sweetapp.com', + maintainer='Alex Gronholm', + maintainer_email='alex.gronholm+pypi@nextday.fi', + url='http://code.google.com/p/pythonfutures', + download_url='http://pypi.python.org/pypi/futures/', + packages=['futures', 'concurrent', 'concurrent.futures'], + license='BSD', + classifiers=['License :: OSI Approved :: BSD License', + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'Programming Language :: Python :: 2.5', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.1'], + **extras + ) diff --git a/third_party/pythonfutures/test_futures.py b/third_party/pythonfutures/test_futures.py new file mode 100755 index 00000000..dd7fd3e6 --- /dev/null +++ b/third_party/pythonfutures/test_futures.py @@ -0,0 +1,723 @@ +from __future__ import with_statement +import os +import subprocess +import sys +import threading +import functools +import contextlib +import logging +import re +import time + +from concurrent import futures +from concurrent.futures._base import ( + PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future) + +try: + import unittest2 as unittest +except ImportError: + import unittest + +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + +try: + from test import test_support +except ImportError: + from test import support as test_support + +try: + next +except NameError: + next = lambda x: x.next() + + +def reap_threads(func): + """Use this function when threads are being used. This will + ensure that the threads are cleaned up even when the test fails. + If threading is unavailable this function does nothing. + """ + @functools.wraps(func) + def decorator(*args): + key = test_support.threading_setup() + try: + return func(*args) + finally: + test_support.threading_cleanup(*key) + return decorator + + +# Executing the interpreter in a subprocess +def _assert_python(expected_success, *args, **env_vars): + cmd_line = [sys.executable] + if not env_vars: + cmd_line.append('-E') + # Need to preserve the original environment, for in-place testing of + # shared library builds. + env = os.environ.copy() + # But a special flag that can be set to override -- in this case, the + # caller is responsible to pass the full environment. + if env_vars.pop('__cleanenv', None): + env = {} + env.update(env_vars) + cmd_line.extend(args) + p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + env=env) + try: + out, err = p.communicate() + finally: + subprocess._cleanup() + p.stdout.close() + p.stderr.close() + rc = p.returncode + err = strip_python_stderr(err) + if (rc and expected_success) or (not rc and not expected_success): + raise AssertionError( + "Process return code is %d, " + "stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore'))) + return rc, out, err + + +def assert_python_ok(*args, **env_vars): + """ + Assert that running the interpreter with `args` and optional environment + variables `env_vars` is ok and return a (return code, stdout, stderr) tuple. + """ + return _assert_python(True, *args, **env_vars) + + +def strip_python_stderr(stderr): + """Strip the stderr of a Python process from potential debug output + emitted by the interpreter. + + This will typically be run on the result of the communicate() method + of a subprocess.Popen object. + """ + stderr = re.sub(r"\[\d+ refs\]\r?\n?$".encode(), "".encode(), stderr).strip() + return stderr + + +@contextlib.contextmanager +def captured_stderr(): + """Return a context manager used by captured_stdout/stdin/stderr + that temporarily replaces the sys stream *stream_name* with a StringIO.""" + logging_stream = StringIO() + handler = logging.StreamHandler(logging_stream) + logging.root.addHandler(handler) + + try: + yield logging_stream + finally: + logging.root.removeHandler(handler) + + +def create_future(state=PENDING, exception=None, result=None): + f = Future() + f._state = state + f._exception = exception + f._result = result + return f + + +PENDING_FUTURE = create_future(state=PENDING) +RUNNING_FUTURE = create_future(state=RUNNING) +CANCELLED_FUTURE = create_future(state=CANCELLED) +CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED) +EXCEPTION_FUTURE = create_future(state=FINISHED, exception=IOError()) +SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42) + + +def mul(x, y): + return x * y + + +def sleep_and_raise(t): + time.sleep(t) + raise Exception('this is an exception') + +def sleep_and_print(t, msg): + time.sleep(t) + print(msg) + sys.stdout.flush() + + +class ExecutorMixin: + worker_count = 5 + + def setUp(self): + self.t1 = time.time() + try: + self.executor = self.executor_type(max_workers=self.worker_count) + except NotImplementedError: + e = sys.exc_info()[1] + self.skipTest(str(e)) + self._prime_executor() + + def tearDown(self): + self.executor.shutdown(wait=True) + dt = time.time() - self.t1 + if test_support.verbose: + print("%.2fs" % dt) + self.assertLess(dt, 60, "synchronization issue: test lasted too long") + + def _prime_executor(self): + # Make sure that the executor is ready to do work before running the + # tests. This should reduce the probability of timeouts in the tests. + futures = [self.executor.submit(time.sleep, 0.1) + for _ in range(self.worker_count)] + + for f in futures: + f.result() + + +class ThreadPoolMixin(ExecutorMixin): + executor_type = futures.ThreadPoolExecutor + + +class ProcessPoolMixin(ExecutorMixin): + executor_type = futures.ProcessPoolExecutor + + +class ExecutorShutdownTest(unittest.TestCase): + def test_run_after_shutdown(self): + self.executor.shutdown() + self.assertRaises(RuntimeError, + self.executor.submit, + pow, 2, 5) + + def test_interpreter_shutdown(self): + # Test the atexit hook for shutdown of worker threads and processes + rc, out, err = assert_python_ok('-c', """if 1: + from concurrent.futures import %s + from time import sleep + from test_futures import sleep_and_print + t = %s(5) + t.submit(sleep_and_print, 1.0, "apple") + """ % (self.executor_type.__name__, self.executor_type.__name__)) + # Errors in atexit hooks don't change the process exit code, check + # stderr manually. + self.assertFalse(err) + self.assertEqual(out.strip(), "apple".encode()) + + def test_hang_issue12364(self): + fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)] + self.executor.shutdown() + for f in fs: + f.result() + + +class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest): + def _prime_executor(self): + pass + + def test_threads_terminate(self): + self.executor.submit(mul, 21, 2) + self.executor.submit(mul, 6, 7) + self.executor.submit(mul, 3, 14) + self.assertEqual(len(self.executor._threads), 3) + self.executor.shutdown() + for t in self.executor._threads: + t.join() + + def test_context_manager_shutdown(self): + with futures.ThreadPoolExecutor(max_workers=5) as e: + executor = e + self.assertEqual(list(e.map(abs, range(-5, 5))), + [5, 4, 3, 2, 1, 0, 1, 2, 3, 4]) + + for t in executor._threads: + t.join() + + def test_del_shutdown(self): + executor = futures.ThreadPoolExecutor(max_workers=5) + executor.map(abs, range(-5, 5)) + threads = executor._threads + del executor + + for t in threads: + t.join() + + +class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest): + def _prime_executor(self): + pass + + def test_processes_terminate(self): + self.executor.submit(mul, 21, 2) + self.executor.submit(mul, 6, 7) + self.executor.submit(mul, 3, 14) + self.assertEqual(len(self.executor._processes), 5) + processes = self.executor._processes + self.executor.shutdown() + + for p in processes: + p.join() + + def test_context_manager_shutdown(self): + with futures.ProcessPoolExecutor(max_workers=5) as e: + processes = e._processes + self.assertEqual(list(e.map(abs, range(-5, 5))), + [5, 4, 3, 2, 1, 0, 1, 2, 3, 4]) + + for p in processes: + p.join() + + def test_del_shutdown(self): + executor = futures.ProcessPoolExecutor(max_workers=5) + list(executor.map(abs, range(-5, 5))) + queue_management_thread = executor._queue_management_thread + processes = executor._processes + del executor + + queue_management_thread.join() + for p in processes: + p.join() + + +class WaitTests(unittest.TestCase): + + def test_first_completed(self): + future1 = self.executor.submit(mul, 21, 2) + future2 = self.executor.submit(time.sleep, 1.5) + + done, not_done = futures.wait( + [CANCELLED_FUTURE, future1, future2], + return_when=futures.FIRST_COMPLETED) + + self.assertEqual(set([future1]), done) + self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done) + + def test_first_completed_some_already_completed(self): + future1 = self.executor.submit(time.sleep, 1.5) + + finished, pending = futures.wait( + [CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1], + return_when=futures.FIRST_COMPLETED) + + self.assertEqual( + set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]), + finished) + self.assertEqual(set([future1]), pending) + + def test_first_exception(self): + future1 = self.executor.submit(mul, 2, 21) + future2 = self.executor.submit(sleep_and_raise, 1.5) + future3 = self.executor.submit(time.sleep, 3) + + finished, pending = futures.wait( + [future1, future2, future3], + return_when=futures.FIRST_EXCEPTION) + + self.assertEqual(set([future1, future2]), finished) + self.assertEqual(set([future3]), pending) + + def test_first_exception_some_already_complete(self): + future1 = self.executor.submit(divmod, 21, 0) + future2 = self.executor.submit(time.sleep, 1.5) + + finished, pending = futures.wait( + [SUCCESSFUL_FUTURE, + CANCELLED_FUTURE, + CANCELLED_AND_NOTIFIED_FUTURE, + future1, future2], + return_when=futures.FIRST_EXCEPTION) + + self.assertEqual(set([SUCCESSFUL_FUTURE, + CANCELLED_AND_NOTIFIED_FUTURE, + future1]), finished) + self.assertEqual(set([CANCELLED_FUTURE, future2]), pending) + + def test_first_exception_one_already_failed(self): + future1 = self.executor.submit(time.sleep, 2) + + finished, pending = futures.wait( + [EXCEPTION_FUTURE, future1], + return_when=futures.FIRST_EXCEPTION) + + self.assertEqual(set([EXCEPTION_FUTURE]), finished) + self.assertEqual(set([future1]), pending) + + def test_all_completed(self): + future1 = self.executor.submit(divmod, 2, 0) + future2 = self.executor.submit(mul, 2, 21) + + finished, pending = futures.wait( + [SUCCESSFUL_FUTURE, + CANCELLED_AND_NOTIFIED_FUTURE, + EXCEPTION_FUTURE, + future1, + future2], + return_when=futures.ALL_COMPLETED) + + self.assertEqual(set([SUCCESSFUL_FUTURE, + CANCELLED_AND_NOTIFIED_FUTURE, + EXCEPTION_FUTURE, + future1, + future2]), finished) + self.assertEqual(set(), pending) + + def test_timeout(self): + future1 = self.executor.submit(mul, 6, 7) + future2 = self.executor.submit(time.sleep, 3) + + finished, pending = futures.wait( + [CANCELLED_AND_NOTIFIED_FUTURE, + EXCEPTION_FUTURE, + SUCCESSFUL_FUTURE, + future1, future2], + timeout=1.5, + return_when=futures.ALL_COMPLETED) + + self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE, + EXCEPTION_FUTURE, + SUCCESSFUL_FUTURE, + future1]), finished) + self.assertEqual(set([future2]), pending) + + +class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests): + + def test_pending_calls_race(self): + # Issue #14406: multi-threaded race condition when waiting on all + # futures. + event = threading.Event() + def future_func(): + event.wait() + oldswitchinterval = sys.getcheckinterval() + sys.setcheckinterval(1) + try: + fs = set(self.executor.submit(future_func) for i in range(100)) + event.set() + futures.wait(fs, return_when=futures.ALL_COMPLETED) + finally: + sys.setcheckinterval(oldswitchinterval) + + +class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests): + pass + + +class AsCompletedTests(unittest.TestCase): + # TODO(brian@sweetapp.com): Should have a test with a non-zero timeout. + def test_no_timeout(self): + future1 = self.executor.submit(mul, 2, 21) + future2 = self.executor.submit(mul, 7, 6) + + completed = set(futures.as_completed( + [CANCELLED_AND_NOTIFIED_FUTURE, + EXCEPTION_FUTURE, + SUCCESSFUL_FUTURE, + future1, future2])) + self.assertEqual(set( + [CANCELLED_AND_NOTIFIED_FUTURE, + EXCEPTION_FUTURE, + SUCCESSFUL_FUTURE, + future1, future2]), + completed) + + def test_zero_timeout(self): + future1 = self.executor.submit(time.sleep, 2) + completed_futures = set() + try: + for future in futures.as_completed( + [CANCELLED_AND_NOTIFIED_FUTURE, + EXCEPTION_FUTURE, + SUCCESSFUL_FUTURE, + future1], + timeout=0): + completed_futures.add(future) + except futures.TimeoutError: + pass + + self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE, + EXCEPTION_FUTURE, + SUCCESSFUL_FUTURE]), + completed_futures) + + +class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests): + pass + + +class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests): + pass + + +class ExecutorTest(unittest.TestCase): + # Executor.shutdown() and context manager usage is tested by + # ExecutorShutdownTest. + def test_submit(self): + future = self.executor.submit(pow, 2, 8) + self.assertEqual(256, future.result()) + + def test_submit_keyword(self): + future = self.executor.submit(mul, 2, y=8) + self.assertEqual(16, future.result()) + + def test_map(self): + self.assertEqual( + list(self.executor.map(pow, range(10), range(10))), + list(map(pow, range(10), range(10)))) + + def test_map_exception(self): + i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5]) + self.assertEqual(next(i), (0, 1)) + self.assertEqual(next(i), (0, 1)) + self.assertRaises(ZeroDivisionError, next, i) + + def test_map_timeout(self): + results = [] + try: + for i in self.executor.map(time.sleep, + [0, 0, 3], + timeout=1.5): + results.append(i) + except futures.TimeoutError: + pass + else: + self.fail('expected TimeoutError') + + self.assertEqual([None, None], results) + + +class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest): + pass + + +class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest): + pass + + +class FutureTests(unittest.TestCase): + def test_done_callback_with_result(self): + callback_result = [None] + def fn(callback_future): + callback_result[0] = callback_future.result() + + f = Future() + f.add_done_callback(fn) + f.set_result(5) + self.assertEqual(5, callback_result[0]) + + def test_done_callback_with_exception(self): + callback_exception = [None] + def fn(callback_future): + callback_exception[0] = callback_future.exception() + + f = Future() + f.add_done_callback(fn) + f.set_exception(Exception('test')) + self.assertEqual(('test',), callback_exception[0].args) + + def test_done_callback_with_cancel(self): + was_cancelled = [None] + def fn(callback_future): + was_cancelled[0] = callback_future.cancelled() + + f = Future() + f.add_done_callback(fn) + self.assertTrue(f.cancel()) + self.assertTrue(was_cancelled[0]) + + def test_done_callback_raises(self): + with captured_stderr() as stderr: + raising_was_called = [False] + fn_was_called = [False] + + def raising_fn(callback_future): + raising_was_called[0] = True + raise Exception('doh!') + + def fn(callback_future): + fn_was_called[0] = True + + f = Future() + f.add_done_callback(raising_fn) + f.add_done_callback(fn) + f.set_result(5) + self.assertTrue(raising_was_called) + self.assertTrue(fn_was_called) + self.assertIn('Exception: doh!', stderr.getvalue()) + + def test_done_callback_already_successful(self): + callback_result = [None] + def fn(callback_future): + callback_result[0] = callback_future.result() + + f = Future() + f.set_result(5) + f.add_done_callback(fn) + self.assertEqual(5, callback_result[0]) + + def test_done_callback_already_failed(self): + callback_exception = [None] + def fn(callback_future): + callback_exception[0] = callback_future.exception() + + f = Future() + f.set_exception(Exception('test')) + f.add_done_callback(fn) + self.assertEqual(('test',), callback_exception[0].args) + + def test_done_callback_already_cancelled(self): + was_cancelled = [None] + def fn(callback_future): + was_cancelled[0] = callback_future.cancelled() + + f = Future() + self.assertTrue(f.cancel()) + f.add_done_callback(fn) + self.assertTrue(was_cancelled[0]) + + def test_repr(self): + self.assertRegexpMatches(repr(PENDING_FUTURE), + '') + self.assertRegexpMatches(repr(RUNNING_FUTURE), + '') + self.assertRegexpMatches(repr(CANCELLED_FUTURE), + '') + self.assertRegexpMatches(repr(CANCELLED_AND_NOTIFIED_FUTURE), + '') + self.assertRegexpMatches( + repr(EXCEPTION_FUTURE), + '') + self.assertRegexpMatches( + repr(SUCCESSFUL_FUTURE), + '') + + def test_cancel(self): + f1 = create_future(state=PENDING) + f2 = create_future(state=RUNNING) + f3 = create_future(state=CANCELLED) + f4 = create_future(state=CANCELLED_AND_NOTIFIED) + f5 = create_future(state=FINISHED, exception=IOError()) + f6 = create_future(state=FINISHED, result=5) + + self.assertTrue(f1.cancel()) + self.assertEqual(f1._state, CANCELLED) + + self.assertFalse(f2.cancel()) + self.assertEqual(f2._state, RUNNING) + + self.assertTrue(f3.cancel()) + self.assertEqual(f3._state, CANCELLED) + + self.assertTrue(f4.cancel()) + self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED) + + self.assertFalse(f5.cancel()) + self.assertEqual(f5._state, FINISHED) + + self.assertFalse(f6.cancel()) + self.assertEqual(f6._state, FINISHED) + + def test_cancelled(self): + self.assertFalse(PENDING_FUTURE.cancelled()) + self.assertFalse(RUNNING_FUTURE.cancelled()) + self.assertTrue(CANCELLED_FUTURE.cancelled()) + self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled()) + self.assertFalse(EXCEPTION_FUTURE.cancelled()) + self.assertFalse(SUCCESSFUL_FUTURE.cancelled()) + + def test_done(self): + self.assertFalse(PENDING_FUTURE.done()) + self.assertFalse(RUNNING_FUTURE.done()) + self.assertTrue(CANCELLED_FUTURE.done()) + self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done()) + self.assertTrue(EXCEPTION_FUTURE.done()) + self.assertTrue(SUCCESSFUL_FUTURE.done()) + + def test_running(self): + self.assertFalse(PENDING_FUTURE.running()) + self.assertTrue(RUNNING_FUTURE.running()) + self.assertFalse(CANCELLED_FUTURE.running()) + self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running()) + self.assertFalse(EXCEPTION_FUTURE.running()) + self.assertFalse(SUCCESSFUL_FUTURE.running()) + + def test_result_with_timeout(self): + self.assertRaises(futures.TimeoutError, + PENDING_FUTURE.result, timeout=0) + self.assertRaises(futures.TimeoutError, + RUNNING_FUTURE.result, timeout=0) + self.assertRaises(futures.CancelledError, + CANCELLED_FUTURE.result, timeout=0) + self.assertRaises(futures.CancelledError, + CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0) + self.assertRaises(IOError, EXCEPTION_FUTURE.result, timeout=0) + self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42) + + def test_result_with_success(self): + # TODO(brian@sweetapp.com): This test is timing dependant. + def notification(): + # Wait until the main thread is waiting for the result. + time.sleep(1) + f1.set_result(42) + + f1 = create_future(state=PENDING) + t = threading.Thread(target=notification) + t.start() + + self.assertEqual(f1.result(timeout=5), 42) + + def test_result_with_cancel(self): + # TODO(brian@sweetapp.com): This test is timing dependant. + def notification(): + # Wait until the main thread is waiting for the result. + time.sleep(1) + f1.cancel() + + f1 = create_future(state=PENDING) + t = threading.Thread(target=notification) + t.start() + + self.assertRaises(futures.CancelledError, f1.result, timeout=5) + + def test_exception_with_timeout(self): + self.assertRaises(futures.TimeoutError, + PENDING_FUTURE.exception, timeout=0) + self.assertRaises(futures.TimeoutError, + RUNNING_FUTURE.exception, timeout=0) + self.assertRaises(futures.CancelledError, + CANCELLED_FUTURE.exception, timeout=0) + self.assertRaises(futures.CancelledError, + CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0) + self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0), + IOError)) + self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None) + + def test_exception_with_success(self): + def notification(): + # Wait until the main thread is waiting for the exception. + time.sleep(1) + with f1._condition: + f1._state = FINISHED + f1._exception = IOError() + f1._condition.notify_all() + + f1 = create_future(state=PENDING) + t = threading.Thread(target=notification) + t.start() + + self.assertTrue(isinstance(f1.exception(timeout=5), IOError)) + +@reap_threads +def test_main(): + try: + test_support.run_unittest(ProcessPoolExecutorTest, + ThreadPoolExecutorTest, + ProcessPoolWaitTests, + ThreadPoolWaitTests, + ProcessPoolAsCompletedTests, + ThreadPoolAsCompletedTests, + FutureTests, + ProcessPoolShutdownTest, + ThreadPoolShutdownTest) + finally: + test_support.reap_children() + +if __name__ == "__main__": + test_main() diff --git a/third_party/pythonfutures/tox.ini b/third_party/pythonfutures/tox.ini new file mode 100755 index 00000000..c1ff2f13 --- /dev/null +++ b/third_party/pythonfutures/tox.ini @@ -0,0 +1,8 @@ +[tox] +envlist = py26,py27,py31 + +[testenv] +commands={envpython} test_futures.py [] + +[testenv:py26] +deps=unittest2