diff --git a/deployment/ansible.cfg b/deployment/ansible.cfg
index fa414647140b6074cc3e534e53e220c3f33362a2..7ed4eebbc4c217ad481d0db48be836c7d33296b2 100644
--- a/deployment/ansible.cfg
+++ b/deployment/ansible.cfg
@@ -9,5 +9,10 @@ filter_plugins =  ./filter_plugins/
 # This is 2 hours
 fact_caching_timeout = 7200
 
+# Mitogen
+host_key_checking = False
+strategy_plugins = ./lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy
+strategy = mitogen_linear
+
 [ssh_connection]
 pipelining = True
diff --git a/deployment/lib/mitogen-0.2.9/LICENSE b/deployment/lib/mitogen-0.2.9/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..70e43a944447a2d069fa9637357344e15e7224b8
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/LICENSE
@@ -0,0 +1,26 @@
+Copyright 2019, David Wilson
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+may be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/deployment/lib/mitogen-0.2.9/MANIFEST.in b/deployment/lib/mitogen-0.2.9/MANIFEST.in
new file mode 100644
index 0000000000000000000000000000000000000000..1aba38f67a2211cf5b09466d7b411206cb7223bf
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/MANIFEST.in
@@ -0,0 +1 @@
+include LICENSE
diff --git a/deployment/lib/mitogen-0.2.9/PKG-INFO b/deployment/lib/mitogen-0.2.9/PKG-INFO
new file mode 100644
index 0000000000000000000000000000000000000000..3346b33426457f1c755a46348d38edaa5333d687
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/PKG-INFO
@@ -0,0 +1,23 @@
+Metadata-Version: 1.1
+Name: mitogen
+Version: 0.2.9
+Summary: Library for writing distributed self-replicating programs.
+Home-page: https://github.com/dw/mitogen/
+Author: David Wilson
+Author-email: UNKNOWN
+License: New BSD
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: Environment :: Console
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2.4
+Classifier: Programming Language :: Python :: 2.5
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Topic :: System :: Distributed Computing
+Classifier: Topic :: System :: Systems Administration
diff --git a/deployment/lib/mitogen-0.2.9/README.md b/deployment/lib/mitogen-0.2.9/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..da93a80b5b00f537fe36dd2eecd33ef79ef62862
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/README.md
@@ -0,0 +1,13 @@
+
+# Mitogen
+
+<!-- [![Build Status](https://travis-ci.org/dw/mitogen.png?branch=master)](https://travis-ci.org/dw/mitogen}) -->
+<a href="https://mitogen.networkgenomics.com/">Please see the documentation</a>.
+
+![](https://i.imgur.com/eBM6LhJ.gif)
+
+[![Total alerts](https://img.shields.io/lgtm/alerts/g/dw/mitogen.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/dw/mitogen/alerts/)
+
+[![Build Status](https://travis-ci.org/dw/mitogen.svg?branch=master)](https://travis-ci.org/dw/mitogen)
+
+[![Pipelines Status](https://dev.azure.com/dw-mitogen/Mitogen/_apis/build/status/dw.mitogen?branchName=master)](https://dev.azure.com/dw-mitogen/Mitogen/_build/latest?definitionId=1?branchName=master)
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/__init__.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/affinity.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/affinity.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f4c8db568348d3a095dbe8f5cd3d8708565efb1
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/affinity.py
@@ -0,0 +1,286 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+"""
+As Mitogen separates asynchronous IO out to a broker thread, communication
+necessarily involves context switching and waking that thread. When application
+threads and the broker share a CPU, this can be almost invisibly fast - around
+25 microseconds for a full A->B->A round-trip.
+
+However when threads are scheduled on different CPUs, round-trip delays
+regularly vary wildly, and easily into milliseconds. Many contributing factors
+exist, not least scenarios like:
+
+1. A is preempted immediately after waking B, but before releasing the GIL.
+2. B wakes from IO wait only to immediately enter futex wait.
+3. A may wait 10ms or more for another timeslice, as the scheduler on its CPU
+   runs threads unrelated to its transaction (i.e. not B), wake only to release
+   its GIL, before entering IO sleep waiting for a reply from B, which cannot
+   exist yet.
+4. B wakes, acquires GIL, performs work, and sends reply to A, causing it to
+   wake. B is preempted before releasing GIL.
+5. A wakes from IO wait only to immediately enter futex wait.
+6. B may wait 10ms or more for another timeslice, wake only to release its GIL,
+   before sleeping again.
+7. A wakes, acquires GIL, finally receives reply.
+
+Per above if we are unlucky, on an even moderately busy machine it is possible
+to lose milliseconds just in scheduling delay, and the effect is compounded
+when pairs of threads in process A are communicating with pairs of threads in
+process B using the same scheme, such as when Ansible WorkerProcess is
+communicating with ContextService in the connection multiplexer. In the worst
+case it could involve 4 threads working in lockstep spread across 4 busy CPUs.
+
+Since multithreading in Python is essentially useless except for waiting on IO
+due to the presence of the GIL, at least in Ansible there is no good reason for
+threads in the same process to run on distinct CPUs - they always operate in
+lockstep due to the GIL, and are thus vulnerable to issues like above.
+
+Linux lacks any natural API to describe what we want, it only permits
+individual threads to be constrained to run on specific CPUs, and for that
+constraint to be inherited by new threads and forks of the constrained thread.
+
+This module therefore implements a CPU pinning policy for Ansible processes,
+providing methods that should be called early in any new process, either to
+rebalance which CPU it is pinned to, or in the case of subprocesses, to remove
+the pinning entirely. It is likely to require ongoing tweaking, since pinning
+necessarily involves preventing the scheduler from making load balancing
+decisions.
+"""
+
+from __future__ import absolute_import
+import ctypes
+import logging
+import mmap
+import multiprocessing
+import os
+import struct
+
+import mitogen.core
+import mitogen.parent
+
+
+LOG = logging.getLogger(__name__)
+
+
+try:
+    _libc = ctypes.CDLL(None, use_errno=True)
+    _strerror = _libc.strerror
+    _strerror.restype = ctypes.c_char_p
+    _sem_init = _libc.sem_init
+    _sem_wait = _libc.sem_wait
+    _sem_post = _libc.sem_post
+    _sched_setaffinity = _libc.sched_setaffinity
+except (OSError, AttributeError):
+    _libc = None
+    _strerror = None
+    _sem_init = None
+    _sem_wait = None
+    _sem_post = None
+    _sched_setaffinity = None
+
+
+class sem_t(ctypes.Structure):
+    """
+    Wrap sem_t to allow storing a lock in shared memory.
+    """
+    _fields_ = [
+        ('data', ctypes.c_uint8 * 128),
+    ]
+
+    def init(self):
+        if _sem_init(self.data, 1, 1):
+            raise Exception(_strerror(ctypes.get_errno()))
+
+    def acquire(self):
+        if _sem_wait(self.data):
+            raise Exception(_strerror(ctypes.get_errno()))
+
+    def release(self):
+        if _sem_post(self.data):
+            raise Exception(_strerror(ctypes.get_errno()))
+
+
+class State(ctypes.Structure):
+    """
+    Contents of shared memory segment. This allows :meth:`Manager.assign` to be
+    called from any child, since affinity assignment must happen from within
+    the context of the new child process.
+    """
+    _fields_ = [
+        ('lock', sem_t),
+        ('counter', ctypes.c_uint8),
+    ]
+
+
+class Policy(object):
+    """
+    Process affinity policy.
+    """
+    def assign_controller(self):
+        """
+        Assign the Ansible top-level policy to this process.
+        """
+
+    def assign_muxprocess(self, index):
+        """
+        Assign the MuxProcess policy to this process.
+        """
+
+    def assign_worker(self):
+        """
+        Assign the WorkerProcess policy to this process.
+        """
+
+    def assign_subprocess(self):
+        """
+        Assign the helper subprocess policy to this process.
+        """
+
+class FixedPolicy(Policy):
+    """
+    :class:`Policy` for machines where the only control method available is
+    fixed CPU placement. The scheme here was tested on an otherwise idle 16
+    thread machine.
+
+    - The connection multiplexer is pinned to CPU 0.
+    - The Ansible top-level (strategy) is pinned to CPU 1.
+    - WorkerProcesses are pinned sequentually to 2..N, wrapping around when no
+      more CPUs exist.
+    - Children such as SSH may be scheduled on any CPU except 0/1.
+
+    If the machine has less than 4 cores available, the top-level and workers
+    are pinned between CPU 2..N, i.e. no CPU is reserved for the top-level
+    process.
+
+    This could at least be improved by having workers pinned to independent
+    cores, before reusing the second hyperthread of an existing core.
+
+    A hook is installed that causes :meth:`reset` to run in the child of any
+    process created with :func:`mitogen.parent.popen`, ensuring CPU-intensive
+    children like SSH are not forced to share the same core as the (otherwise
+    potentially very busy) parent.
+    """
+    def __init__(self, cpu_count=None):
+        #: For tests.
+        self.cpu_count = cpu_count or multiprocessing.cpu_count()
+        self.mem = mmap.mmap(-1, 4096)
+        self.state = State.from_buffer(self.mem)
+        self.state.lock.init()
+
+        if self.cpu_count < 2:
+            # uniprocessor
+            self._reserve_mux = False
+            self._reserve_controller = False
+            self._reserve_mask = 0
+            self._reserve_shift = 0
+        elif self.cpu_count < 4:
+            # small SMP
+            self._reserve_mux = True
+            self._reserve_controller = False
+            self._reserve_mask = 1
+            self._reserve_shift = 1
+        else:
+            # big SMP
+            self._reserve_mux = True
+            self._reserve_controller = True
+            self._reserve_mask = 3
+            self._reserve_shift = 2
+
+    def _set_affinity(self, descr, mask):
+        if descr:
+            LOG.debug('CPU mask for %s: %#08x', descr, mask)
+        mitogen.parent._preexec_hook = self._clear
+        self._set_cpu_mask(mask)
+
+    def _balance(self, descr):
+        self.state.lock.acquire()
+        try:
+            n = self.state.counter
+            self.state.counter += 1
+        finally:
+            self.state.lock.release()
+
+        self._set_cpu(descr, self._reserve_shift + (
+            (n % (self.cpu_count - self._reserve_shift))
+        ))
+
+    def _set_cpu(self, descr, cpu):
+        self._set_affinity(descr, 1 << (cpu % self.cpu_count))
+
+    def _clear(self):
+        all_cpus = (1 << self.cpu_count) - 1
+        self._set_affinity(None, all_cpus & ~self._reserve_mask)
+
+    def assign_controller(self):
+        if self._reserve_controller:
+            self._set_cpu('Ansible top-level process', 1)
+        else:
+            self._balance('Ansible top-level process')
+
+    def assign_muxprocess(self, index):
+        self._set_cpu('MuxProcess %d' % (index,), index)
+
+    def assign_worker(self):
+        self._balance('WorkerProcess')
+
+    def assign_subprocess(self):
+        self._clear()
+
+
+class LinuxPolicy(FixedPolicy):
+    def _mask_to_bytes(self, mask):
+        """
+        Convert the (type long) mask to a cpu_set_t.
+        """
+        chunks = []
+        shiftmask = (2 ** 64) - 1
+        for x in range(16):
+            chunks.append(struct.pack('<Q', mask & shiftmask))
+            mask >>= 64
+        return mitogen.core.b('').join(chunks)
+
+    def _get_thread_ids(self):
+        try:
+            ents = os.listdir('/proc/self/task')
+        except OSError:
+            LOG.debug('cannot fetch thread IDs for current process')
+            return [os.getpid()]
+
+        return [int(s) for s in ents if s.isdigit()]
+
+    def _set_cpu_mask(self, mask):
+        s = self._mask_to_bytes(mask)
+        for tid in self._get_thread_ids():
+            _sched_setaffinity(tid, len(s), s)
+
+
+if _sched_setaffinity is not None:
+    policy = LinuxPolicy()
+else:
+    policy = Policy()
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/compat/__init__.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/compat/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/compat/simplejson/__init__.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/compat/simplejson/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5b4d39913770fad1b415caf6cc08f08a5d38bac
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/compat/simplejson/__init__.py
@@ -0,0 +1,318 @@
+r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
+JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
+interchange format.
+
+:mod:`simplejson` exposes an API familiar to users of the standard library
+:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
+version of the :mod:`json` library contained in Python 2.6, but maintains
+compatibility with Python 2.4 and Python 2.5 and (currently) has
+significant performance advantages, even without using the optional C
+extension for speedups.
+
+Encoding basic Python object hierarchies::
+
+    >>> import simplejson as json
+    >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
+    '["foo", {"bar": ["baz", null, 1.0, 2]}]'
+    >>> print json.dumps("\"foo\bar")
+    "\"foo\bar"
+    >>> print json.dumps(u'\u1234')
+    "\u1234"
+    >>> print json.dumps('\\')
+    "\\"
+    >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
+    {"a": 0, "b": 0, "c": 0}
+    >>> from StringIO import StringIO
+    >>> io = StringIO()
+    >>> json.dump(['streaming API'], io)
+    >>> io.getvalue()
+    '["streaming API"]'
+
+Compact encoding::
+
+    >>> import simplejson as json
+    >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
+    '[1,2,3,{"4":5,"6":7}]'
+
+Pretty printing::
+
+    >>> import simplejson as json
+    >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
+    >>> print '\n'.join([l.rstrip() for l in  s.splitlines()])
+    {
+        "4": 5,
+        "6": 7
+    }
+
+Decoding JSON::
+
+    >>> import simplejson as json
+    >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
+    >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
+    True
+    >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
+    True
+    >>> from StringIO import StringIO
+    >>> io = StringIO('["streaming API"]')
+    >>> json.load(io)[0] == 'streaming API'
+    True
+
+Specializing JSON object decoding::
+
+    >>> import simplejson as json
+    >>> def as_complex(dct):
+    ...     if '__complex__' in dct:
+    ...         return complex(dct['real'], dct['imag'])
+    ...     return dct
+    ...
+    >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
+    ...     object_hook=as_complex)
+    (1+2j)
+    >>> import decimal
+    >>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
+    True
+
+Specializing JSON object encoding::
+
+    >>> import simplejson as json
+    >>> def encode_complex(obj):
+    ...     if isinstance(obj, complex):
+    ...         return [obj.real, obj.imag]
+    ...     raise TypeError(repr(o) + " is not JSON serializable")
+    ...
+    >>> json.dumps(2 + 1j, default=encode_complex)
+    '[2.0, 1.0]'
+    >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
+    '[2.0, 1.0]'
+    >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
+    '[2.0, 1.0]'
+
+
+Using simplejson.tool from the shell to validate and pretty-print::
+
+    $ echo '{"json":"obj"}' | python -m simplejson.tool
+    {
+        "json": "obj"
+    }
+    $ echo '{ 1.2:3.4}' | python -m simplejson.tool
+    Expecting property name: line 1 column 2 (char 2)
+"""
+__version__ = '2.0.9'
+__all__ = [
+    'dump', 'dumps', 'load', 'loads',
+    'JSONDecoder', 'JSONEncoder',
+]
+
+__author__ = 'Bob Ippolito <bob@redivi.com>'
+
+from decoder import JSONDecoder
+from encoder import JSONEncoder
+
+_default_encoder = JSONEncoder(
+    skipkeys=False,
+    ensure_ascii=True,
+    check_circular=True,
+    allow_nan=True,
+    indent=None,
+    separators=None,
+    encoding='utf-8',
+    default=None,
+)
+
+def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
+        allow_nan=True, cls=None, indent=None, separators=None,
+        encoding='utf-8', default=None, **kw):
+    """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
+    ``.write()``-supporting file-like object).
+
+    If ``skipkeys`` is true then ``dict`` keys that are not basic types
+    (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
+    will be skipped instead of raising a ``TypeError``.
+
+    If ``ensure_ascii`` is false, then the some chunks written to ``fp``
+    may be ``unicode`` instances, subject to normal Python ``str`` to
+    ``unicode`` coercion rules. Unless ``fp.write()`` explicitly
+    understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
+    to cause an error.
+
+    If ``check_circular`` is false, then the circular reference check
+    for container types will be skipped and a circular reference will
+    result in an ``OverflowError`` (or worse).
+
+    If ``allow_nan`` is false, then it will be a ``ValueError`` to
+    serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
+    in strict compliance of the JSON specification, instead of using the
+    JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
+
+    If ``indent`` is a non-negative integer, then JSON array elements and object
+    members will be pretty-printed with that indent level. An indent level
+    of 0 will only insert newlines. ``None`` is the most compact representation.
+
+    If ``separators`` is an ``(item_separator, dict_separator)`` tuple
+    then it will be used instead of the default ``(', ', ': ')`` separators.
+    ``(',', ':')`` is the most compact JSON representation.
+
+    ``encoding`` is the character encoding for str instances, default is UTF-8.
+
+    ``default(obj)`` is a function that should return a serializable version
+    of obj or raise TypeError. The default simply raises TypeError.
+
+    To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
+    ``.default()`` method to serialize additional types), specify it with
+    the ``cls`` kwarg.
+
+    """
+    # cached encoder
+    if (not skipkeys and ensure_ascii and
+        check_circular and allow_nan and
+        cls is None and indent is None and separators is None and
+        encoding == 'utf-8' and default is None and not kw):
+        iterable = _default_encoder.iterencode(obj)
+    else:
+        if cls is None:
+            cls = JSONEncoder
+        iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
+            check_circular=check_circular, allow_nan=allow_nan, indent=indent,
+            separators=separators, encoding=encoding,
+            default=default, **kw).iterencode(obj)
+    # could accelerate with writelines in some versions of Python, at
+    # a debuggability cost
+    for chunk in iterable:
+        fp.write(chunk)
+
+
+def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
+        allow_nan=True, cls=None, indent=None, separators=None,
+        encoding='utf-8', default=None, **kw):
+    """Serialize ``obj`` to a JSON formatted ``str``.
+
+    If ``skipkeys`` is false then ``dict`` keys that are not basic types
+    (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
+    will be skipped instead of raising a ``TypeError``.
+
+    If ``ensure_ascii`` is false, then the return value will be a
+    ``unicode`` instance subject to normal Python ``str`` to ``unicode``
+    coercion rules instead of being escaped to an ASCII ``str``.
+
+    If ``check_circular`` is false, then the circular reference check
+    for container types will be skipped and a circular reference will
+    result in an ``OverflowError`` (or worse).
+
+    If ``allow_nan`` is false, then it will be a ``ValueError`` to
+    serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
+    strict compliance of the JSON specification, instead of using the
+    JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
+
+    If ``indent`` is a non-negative integer, then JSON array elements and
+    object members will be pretty-printed with that indent level. An indent
+    level of 0 will only insert newlines. ``None`` is the most compact
+    representation.
+
+    If ``separators`` is an ``(item_separator, dict_separator)`` tuple
+    then it will be used instead of the default ``(', ', ': ')`` separators.
+    ``(',', ':')`` is the most compact JSON representation.
+
+    ``encoding`` is the character encoding for str instances, default is UTF-8.
+
+    ``default(obj)`` is a function that should return a serializable version
+    of obj or raise TypeError. The default simply raises TypeError.
+
+    To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
+    ``.default()`` method to serialize additional types), specify it with
+    the ``cls`` kwarg.
+
+    """
+    # cached encoder
+    if (not skipkeys and ensure_ascii and
+        check_circular and allow_nan and
+        cls is None and indent is None and separators is None and
+        encoding == 'utf-8' and default is None and not kw):
+        return _default_encoder.encode(obj)
+    if cls is None:
+        cls = JSONEncoder
+    return cls(
+        skipkeys=skipkeys, ensure_ascii=ensure_ascii,
+        check_circular=check_circular, allow_nan=allow_nan, indent=indent,
+        separators=separators, encoding=encoding, default=default,
+        **kw).encode(obj)
+
+
+_default_decoder = JSONDecoder(encoding=None, object_hook=None)
+
+
+def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
+        parse_int=None, parse_constant=None, **kw):
+    """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
+    a JSON document) to a Python object.
+
+    If the contents of ``fp`` is encoded with an ASCII based encoding other
+    than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
+    be specified. Encodings that are not ASCII based (such as UCS-2) are
+    not allowed, and should be wrapped with
+    ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
+    object and passed to ``loads()``
+
+    ``object_hook`` is an optional function that will be called with the
+    result of any object literal decode (a ``dict``). The return value of
+    ``object_hook`` will be used instead of the ``dict``. This feature
+    can be used to implement custom decoders (e.g. JSON-RPC class hinting).
+
+    To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
+    kwarg.
+
+    """
+    return loads(fp.read(),
+        encoding=encoding, cls=cls, object_hook=object_hook,
+        parse_float=parse_float, parse_int=parse_int,
+        parse_constant=parse_constant, **kw)
+
+
+def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
+        parse_int=None, parse_constant=None, **kw):
+    """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
+    document) to a Python object.
+
+    If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
+    other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
+    must be specified. Encodings that are not ASCII based (such as UCS-2)
+    are not allowed and should be decoded to ``unicode`` first.
+
+    ``object_hook`` is an optional function that will be called with the
+    result of any object literal decode (a ``dict``). The return value of
+    ``object_hook`` will be used instead of the ``dict``. This feature
+    can be used to implement custom decoders (e.g. JSON-RPC class hinting).
+
+    ``parse_float``, if specified, will be called with the string
+    of every JSON float to be decoded. By default this is equivalent to
+    float(num_str). This can be used to use another datatype or parser
+    for JSON floats (e.g. decimal.Decimal).
+
+    ``parse_int``, if specified, will be called with the string
+    of every JSON int to be decoded. By default this is equivalent to
+    int(num_str). This can be used to use another datatype or parser
+    for JSON integers (e.g. float).
+
+    ``parse_constant``, if specified, will be called with one of the
+    following strings: -Infinity, Infinity, NaN, null, true, false.
+    This can be used to raise an exception if invalid JSON numbers
+    are encountered.
+
+    To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
+    kwarg.
+
+    """
+    if (cls is None and encoding is None and object_hook is None and
+            parse_int is None and parse_float is None and
+            parse_constant is None and not kw):
+        return _default_decoder.decode(s)
+    if cls is None:
+        cls = JSONDecoder
+    if object_hook is not None:
+        kw['object_hook'] = object_hook
+    if parse_float is not None:
+        kw['parse_float'] = parse_float
+    if parse_int is not None:
+        kw['parse_int'] = parse_int
+    if parse_constant is not None:
+        kw['parse_constant'] = parse_constant
+    return cls(encoding=encoding, **kw).decode(s)
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/compat/simplejson/decoder.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/compat/simplejson/decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..b769ea486ca932cd83b1689ef8e055ae2658aa72
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/compat/simplejson/decoder.py
@@ -0,0 +1,354 @@
+"""Implementation of JSONDecoder
+"""
+import re
+import sys
+import struct
+
+from simplejson.scanner import make_scanner
+try:
+    from simplejson._speedups import scanstring as c_scanstring
+except ImportError:
+    c_scanstring = None
+
+__all__ = ['JSONDecoder']
+
+FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
+
+def _floatconstants():
+    _BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
+    if sys.byteorder != 'big':
+        _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
+    nan, inf = struct.unpack('dd', _BYTES)
+    return nan, inf, -inf
+
+NaN, PosInf, NegInf = _floatconstants()
+
+
+def linecol(doc, pos):
+    lineno = doc.count('\n', 0, pos) + 1
+    if lineno == 1:
+        colno = pos
+    else:
+        colno = pos - doc.rindex('\n', 0, pos)
+    return lineno, colno
+
+
+def errmsg(msg, doc, pos, end=None):
+    # Note that this function is called from _speedups
+    lineno, colno = linecol(doc, pos)
+    if end is None:
+        #fmt = '{0}: line {1} column {2} (char {3})'
+        #return fmt.format(msg, lineno, colno, pos)
+        fmt = '%s: line %d column %d (char %d)'
+        return fmt % (msg, lineno, colno, pos)
+    endlineno, endcolno = linecol(doc, end)
+    #fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
+    #return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
+    fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
+    return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
+
+
+_CONSTANTS = {
+    '-Infinity': NegInf,
+    'Infinity': PosInf,
+    'NaN': NaN,
+}
+
+STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
+BACKSLASH = {
+    '"': u'"', '\\': u'\\', '/': u'/',
+    'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
+}
+
+DEFAULT_ENCODING = "utf-8"
+
+def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
+    """Scan the string s for a JSON string. End is the index of the
+    character in s after the quote that started the JSON string.
+    Unescapes all valid JSON string escape sequences and raises ValueError
+    on attempt to decode an invalid string. If strict is False then literal
+    control characters are allowed in the string.
+    
+    Returns a tuple of the decoded string and the index of the character in s
+    after the end quote."""
+    if encoding is None:
+        encoding = DEFAULT_ENCODING
+    chunks = []
+    _append = chunks.append
+    begin = end - 1
+    while 1:
+        chunk = _m(s, end)
+        if chunk is None:
+            raise ValueError(
+                errmsg("Unterminated string starting at", s, begin))
+        end = chunk.end()
+        content, terminator = chunk.groups()
+        # Content is contains zero or more unescaped string characters
+        if content:
+            if not isinstance(content, unicode):
+                content = unicode(content, encoding)
+            _append(content)
+        # Terminator is the end of string, a literal control character,
+        # or a backslash denoting that an escape sequence follows
+        if terminator == '"':
+            break
+        elif terminator != '\\':
+            if strict:
+                msg = "Invalid control character %r at" % (terminator,)
+                #msg = "Invalid control character {0!r} at".format(terminator)
+                raise ValueError(errmsg(msg, s, end))
+            else:
+                _append(terminator)
+                continue
+        try:
+            esc = s[end]
+        except IndexError:
+            raise ValueError(
+                errmsg("Unterminated string starting at", s, begin))
+        # If not a unicode escape sequence, must be in the lookup table
+        if esc != 'u':
+            try:
+                char = _b[esc]
+            except KeyError:
+                msg = "Invalid \\escape: " + repr(esc)
+                raise ValueError(errmsg(msg, s, end))
+            end += 1
+        else:
+            # Unicode escape sequence
+            esc = s[end + 1:end + 5]
+            next_end = end + 5
+            if len(esc) != 4:
+                msg = "Invalid \\uXXXX escape"
+                raise ValueError(errmsg(msg, s, end))
+            uni = int(esc, 16)
+            # Check for surrogate pair on UCS-4 systems
+            if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
+                msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
+                if not s[end + 5:end + 7] == '\\u':
+                    raise ValueError(errmsg(msg, s, end))
+                esc2 = s[end + 7:end + 11]
+                if len(esc2) != 4:
+                    raise ValueError(errmsg(msg, s, end))
+                uni2 = int(esc2, 16)
+                uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
+                next_end += 6
+            char = unichr(uni)
+            end = next_end
+        # Append the unescaped character
+        _append(char)
+    return u''.join(chunks), end
+
+
+# Use speedup if available
+scanstring = c_scanstring or py_scanstring
+
+WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
+WHITESPACE_STR = ' \t\n\r'
+
+def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
+    pairs = {}
+    # Use a slice to prevent IndexError from being raised, the following
+    # check will raise a more specific ValueError if the string is empty
+    nextchar = s[end:end + 1]
+    # Normally we expect nextchar == '"'
+    if nextchar != '"':
+        if nextchar in _ws:
+            end = _w(s, end).end()
+            nextchar = s[end:end + 1]
+        # Trivial empty object
+        if nextchar == '}':
+            return pairs, end + 1
+        elif nextchar != '"':
+            raise ValueError(errmsg("Expecting property name", s, end))
+    end += 1
+    while True:
+        key, end = scanstring(s, end, encoding, strict)
+
+        # To skip some function call overhead we optimize the fast paths where
+        # the JSON key separator is ": " or just ":".
+        if s[end:end + 1] != ':':
+            end = _w(s, end).end()
+            if s[end:end + 1] != ':':
+                raise ValueError(errmsg("Expecting : delimiter", s, end))
+
+        end += 1
+
+        try:
+            if s[end] in _ws:
+                end += 1
+                if s[end] in _ws:
+                    end = _w(s, end + 1).end()
+        except IndexError:
+            pass
+
+        try:
+            value, end = scan_once(s, end)
+        except StopIteration:
+            raise ValueError(errmsg("Expecting object", s, end))
+        pairs[key] = value
+
+        try:
+            nextchar = s[end]
+            if nextchar in _ws:
+                end = _w(s, end + 1).end()
+                nextchar = s[end]
+        except IndexError:
+            nextchar = ''
+        end += 1
+
+        if nextchar == '}':
+            break
+        elif nextchar != ',':
+            raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
+
+        try:
+            nextchar = s[end]
+            if nextchar in _ws:
+                end += 1
+                nextchar = s[end]
+                if nextchar in _ws:
+                    end = _w(s, end + 1).end()
+                    nextchar = s[end]
+        except IndexError:
+            nextchar = ''
+
+        end += 1
+        if nextchar != '"':
+            raise ValueError(errmsg("Expecting property name", s, end - 1))
+
+    if object_hook is not None:
+        pairs = object_hook(pairs)
+    return pairs, end
+
+def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
+    values = []
+    nextchar = s[end:end + 1]
+    if nextchar in _ws:
+        end = _w(s, end + 1).end()
+        nextchar = s[end:end + 1]
+    # Look-ahead for trivial empty array
+    if nextchar == ']':
+        return values, end + 1
+    _append = values.append
+    while True:
+        try:
+            value, end = scan_once(s, end)
+        except StopIteration:
+            raise ValueError(errmsg("Expecting object", s, end))
+        _append(value)
+        nextchar = s[end:end + 1]
+        if nextchar in _ws:
+            end = _w(s, end + 1).end()
+            nextchar = s[end:end + 1]
+        end += 1
+        if nextchar == ']':
+            break
+        elif nextchar != ',':
+            raise ValueError(errmsg("Expecting , delimiter", s, end))
+
+        try:
+            if s[end] in _ws:
+                end += 1
+                if s[end] in _ws:
+                    end = _w(s, end + 1).end()
+        except IndexError:
+            pass
+
+    return values, end
+
+class JSONDecoder(object):
+    """Simple JSON <http://json.org> decoder
+
+    Performs the following translations in decoding by default:
+
+    +---------------+-------------------+
+    | JSON          | Python            |
+    +===============+===================+
+    | object        | dict              |
+    +---------------+-------------------+
+    | array         | list              |
+    +---------------+-------------------+
+    | string        | unicode           |
+    +---------------+-------------------+
+    | number (int)  | int, long         |
+    +---------------+-------------------+
+    | number (real) | float             |
+    +---------------+-------------------+
+    | true          | True              |
+    +---------------+-------------------+
+    | false         | False             |
+    +---------------+-------------------+
+    | null          | None              |
+    +---------------+-------------------+
+
+    It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
+    their corresponding ``float`` values, which is outside the JSON spec.
+
+    """
+
+    def __init__(self, encoding=None, object_hook=None, parse_float=None,
+            parse_int=None, parse_constant=None, strict=True):
+        """``encoding`` determines the encoding used to interpret any ``str``
+        objects decoded by this instance (utf-8 by default).  It has no
+        effect when decoding ``unicode`` objects.
+
+        Note that currently only encodings that are a superset of ASCII work,
+        strings of other encodings should be passed in as ``unicode``.
+
+        ``object_hook``, if specified, will be called with the result
+        of every JSON object decoded and its return value will be used in
+        place of the given ``dict``.  This can be used to provide custom
+        deserializations (e.g. to support JSON-RPC class hinting).
+
+        ``parse_float``, if specified, will be called with the string
+        of every JSON float to be decoded. By default this is equivalent to
+        float(num_str). This can be used to use another datatype or parser
+        for JSON floats (e.g. decimal.Decimal).
+
+        ``parse_int``, if specified, will be called with the string
+        of every JSON int to be decoded. By default this is equivalent to
+        int(num_str). This can be used to use another datatype or parser
+        for JSON integers (e.g. float).
+
+        ``parse_constant``, if specified, will be called with one of the
+        following strings: -Infinity, Infinity, NaN.
+        This can be used to raise an exception if invalid JSON numbers
+        are encountered.
+
+        """
+        self.encoding = encoding
+        self.object_hook = object_hook
+        self.parse_float = parse_float or float
+        self.parse_int = parse_int or int
+        self.parse_constant = parse_constant or _CONSTANTS.__getitem__
+        self.strict = strict
+        self.parse_object = JSONObject
+        self.parse_array = JSONArray
+        self.parse_string = scanstring
+        self.scan_once = make_scanner(self)
+
+    def decode(self, s, _w=WHITESPACE.match):
+        """Return the Python representation of ``s`` (a ``str`` or ``unicode``
+        instance containing a JSON document)
+
+        """
+        obj, end = self.raw_decode(s, idx=_w(s, 0).end())
+        end = _w(s, end).end()
+        if end != len(s):
+            raise ValueError(errmsg("Extra data", s, end, len(s)))
+        return obj
+
+    def raw_decode(self, s, idx=0):
+        """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
+        with a JSON document) and return a 2-tuple of the Python
+        representation and the index in ``s`` where the document ended.
+
+        This can be used to decode a JSON document from a string that may
+        have extraneous data at the end.
+
+        """
+        try:
+            obj, end = self.scan_once(s, idx)
+        except StopIteration:
+            raise ValueError("No JSON object could be decoded")
+        return obj, end
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/compat/simplejson/encoder.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/compat/simplejson/encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf58290366b4e33351a73123c391eabc8ebc45fa
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/compat/simplejson/encoder.py
@@ -0,0 +1,440 @@
+"""Implementation of JSONEncoder
+"""
+import re
+
+try:
+    from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
+except ImportError:
+    c_encode_basestring_ascii = None
+try:
+    from simplejson._speedups import make_encoder as c_make_encoder
+except ImportError:
+    c_make_encoder = None
+
+ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
+ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
+HAS_UTF8 = re.compile(r'[\x80-\xff]')
+ESCAPE_DCT = {
+    '\\': '\\\\',
+    '"': '\\"',
+    '\b': '\\b',
+    '\f': '\\f',
+    '\n': '\\n',
+    '\r': '\\r',
+    '\t': '\\t',
+}
+for i in range(0x20):
+    #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
+    ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
+
+# Assume this produces an infinity on all machines (probably not guaranteed)
+INFINITY = float('1e66666')
+FLOAT_REPR = repr
+
+def encode_basestring(s):
+    """Return a JSON representation of a Python string
+
+    """
+    def replace(match):
+        return ESCAPE_DCT[match.group(0)]
+    return '"' + ESCAPE.sub(replace, s) + '"'
+
+
+def py_encode_basestring_ascii(s):
+    """Return an ASCII-only JSON representation of a Python string
+
+    """
+    if isinstance(s, str) and HAS_UTF8.search(s) is not None:
+        s = s.decode('utf-8')
+    def replace(match):
+        s = match.group(0)
+        try:
+            return ESCAPE_DCT[s]
+        except KeyError:
+            n = ord(s)
+            if n < 0x10000:
+                #return '\\u{0:04x}'.format(n)
+                return '\\u%04x' % (n,)
+            else:
+                # surrogate pair
+                n -= 0x10000
+                s1 = 0xd800 | ((n >> 10) & 0x3ff)
+                s2 = 0xdc00 | (n & 0x3ff)
+                #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
+                return '\\u%04x\\u%04x' % (s1, s2)
+    return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
+
+
+encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
+
+class JSONEncoder(object):
+    """Extensible JSON <http://json.org> encoder for Python data structures.
+
+    Supports the following objects and types by default:
+
+    +-------------------+---------------+
+    | Python            | JSON          |
+    +===================+===============+
+    | dict              | object        |
+    +-------------------+---------------+
+    | list, tuple       | array         |
+    +-------------------+---------------+
+    | str, unicode      | string        |
+    +-------------------+---------------+
+    | int, long, float  | number        |
+    +-------------------+---------------+
+    | True              | true          |
+    +-------------------+---------------+
+    | False             | false         |
+    +-------------------+---------------+
+    | None              | null          |
+    +-------------------+---------------+
+
+    To extend this to recognize other objects, subclass and implement a
+    ``.default()`` method with another method that returns a serializable
+    object for ``o`` if possible, otherwise it should call the superclass
+    implementation (to raise ``TypeError``).
+
+    """
+    item_separator = ', '
+    key_separator = ': '
+    def __init__(self, skipkeys=False, ensure_ascii=True,
+            check_circular=True, allow_nan=True, sort_keys=False,
+            indent=None, separators=None, encoding='utf-8', default=None):
+        """Constructor for JSONEncoder, with sensible defaults.
+
+        If skipkeys is false, then it is a TypeError to attempt
+        encoding of keys that are not str, int, long, float or None.  If
+        skipkeys is True, such items are simply skipped.
+
+        If ensure_ascii is true, the output is guaranteed to be str
+        objects with all incoming unicode characters escaped.  If
+        ensure_ascii is false, the output will be unicode object.
+
+        If check_circular is true, then lists, dicts, and custom encoded
+        objects will be checked for circular references during encoding to
+        prevent an infinite recursion (which would cause an OverflowError).
+        Otherwise, no such check takes place.
+
+        If allow_nan is true, then NaN, Infinity, and -Infinity will be
+        encoded as such.  This behavior is not JSON specification compliant,
+        but is consistent with most JavaScript based encoders and decoders.
+        Otherwise, it will be a ValueError to encode such floats.
+
+        If sort_keys is true, then the output of dictionaries will be
+        sorted by key; this is useful for regression tests to ensure
+        that JSON serializations can be compared on a day-to-day basis.
+
+        If indent is a non-negative integer, then JSON array
+        elements and object members will be pretty-printed with that
+        indent level.  An indent level of 0 will only insert newlines.
+        None is the most compact representation.
+
+        If specified, separators should be a (item_separator, key_separator)
+        tuple.  The default is (', ', ': ').  To get the most compact JSON
+        representation you should specify (',', ':') to eliminate whitespace.
+
+        If specified, default is a function that gets called for objects
+        that can't otherwise be serialized.  It should return a JSON encodable
+        version of the object or raise a ``TypeError``.
+
+        If encoding is not None, then all input strings will be
+        transformed into unicode using that encoding prior to JSON-encoding.
+        The default is UTF-8.
+
+        """
+
+        self.skipkeys = skipkeys
+        self.ensure_ascii = ensure_ascii
+        self.check_circular = check_circular
+        self.allow_nan = allow_nan
+        self.sort_keys = sort_keys
+        self.indent = indent
+        if separators is not None:
+            self.item_separator, self.key_separator = separators
+        if default is not None:
+            self.default = default
+        self.encoding = encoding
+
+    def default(self, o):
+        """Implement this method in a subclass such that it returns
+        a serializable object for ``o``, or calls the base implementation
+        (to raise a ``TypeError``).
+
+        For example, to support arbitrary iterators, you could
+        implement default like this::
+
+            def default(self, o):
+                try:
+                    iterable = iter(o)
+                except TypeError:
+                    pass
+                else:
+                    return list(iterable)
+                return JSONEncoder.default(self, o)
+
+        """
+        raise TypeError(repr(o) + " is not JSON serializable")
+
+    def encode(self, o):
+        """Return a JSON string representation of a Python data structure.
+
+        >>> JSONEncoder().encode({"foo": ["bar", "baz"]})
+        '{"foo": ["bar", "baz"]}'
+
+        """
+        # This is for extremely simple cases and benchmarks.
+        if isinstance(o, basestring):
+            if isinstance(o, str):
+                _encoding = self.encoding
+                if (_encoding is not None
+                        and not (_encoding == 'utf-8')):
+                    o = o.decode(_encoding)
+            if self.ensure_ascii:
+                return encode_basestring_ascii(o)
+            else:
+                return encode_basestring(o)
+        # This doesn't pass the iterator directly to ''.join() because the
+        # exceptions aren't as detailed.  The list call should be roughly
+        # equivalent to the PySequence_Fast that ''.join() would do.
+        chunks = self.iterencode(o, _one_shot=True)
+        if not isinstance(chunks, (list, tuple)):
+            chunks = list(chunks)
+        return ''.join(chunks)
+
+    def iterencode(self, o, _one_shot=False):
+        """Encode the given object and yield each string
+        representation as available.
+
+        For example::
+
+            for chunk in JSONEncoder().iterencode(bigobject):
+                mysocket.write(chunk)
+
+        """
+        if self.check_circular:
+            markers = {}
+        else:
+            markers = None
+        if self.ensure_ascii:
+            _encoder = encode_basestring_ascii
+        else:
+            _encoder = encode_basestring
+        if self.encoding != 'utf-8':
+            def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
+                if isinstance(o, str):
+                    o = o.decode(_encoding)
+                return _orig_encoder(o)
+
+        def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
+            # Check for specials.  Note that this type of test is processor- and/or
+            # platform-specific, so do tests which don't depend on the internals.
+
+            if o != o:
+                text = 'NaN'
+            elif o == _inf:
+                text = 'Infinity'
+            elif o == _neginf:
+                text = '-Infinity'
+            else:
+                return _repr(o)
+
+            if not allow_nan:
+                raise ValueError(
+                    "Out of range float values are not JSON compliant: " +
+                    repr(o))
+
+            return text
+
+
+        if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
+            _iterencode = c_make_encoder(
+                markers, self.default, _encoder, self.indent,
+                self.key_separator, self.item_separator, self.sort_keys,
+                self.skipkeys, self.allow_nan)
+        else:
+            _iterencode = _make_iterencode(
+                markers, self.default, _encoder, self.indent, floatstr,
+                self.key_separator, self.item_separator, self.sort_keys,
+                self.skipkeys, _one_shot)
+        return _iterencode(o, 0)
+
+def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
+        ## HACK: hand-optimized bytecode; turn globals into locals
+        False=False,
+        True=True,
+        ValueError=ValueError,
+        basestring=basestring,
+        dict=dict,
+        float=float,
+        id=id,
+        int=int,
+        isinstance=isinstance,
+        list=list,
+        long=long,
+        str=str,
+        tuple=tuple,
+    ):
+
+    def _iterencode_list(lst, _current_indent_level):
+        if not lst:
+            yield '[]'
+            return
+        if markers is not None:
+            markerid = id(lst)
+            if markerid in markers:
+                raise ValueError("Circular reference detected")
+            markers[markerid] = lst
+        buf = '['
+        if _indent is not None:
+            _current_indent_level += 1
+            newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+            separator = _item_separator + newline_indent
+            buf += newline_indent
+        else:
+            newline_indent = None
+            separator = _item_separator
+        first = True
+        for value in lst:
+            if first:
+                first = False
+            else:
+                buf = separator
+            if isinstance(value, basestring):
+                yield buf + _encoder(value)
+            elif value is None:
+                yield buf + 'null'
+            elif value is True:
+                yield buf + 'true'
+            elif value is False:
+                yield buf + 'false'
+            elif isinstance(value, (int, long)):
+                yield buf + str(value)
+            elif isinstance(value, float):
+                yield buf + _floatstr(value)
+            else:
+                yield buf
+                if isinstance(value, (list, tuple)):
+                    chunks = _iterencode_list(value, _current_indent_level)
+                elif isinstance(value, dict):
+                    chunks = _iterencode_dict(value, _current_indent_level)
+                else:
+                    chunks = _iterencode(value, _current_indent_level)
+                for chunk in chunks:
+                    yield chunk
+        if newline_indent is not None:
+            _current_indent_level -= 1
+            yield '\n' + (' ' * (_indent * _current_indent_level))
+        yield ']'
+        if markers is not None:
+            del markers[markerid]
+
+    def _iterencode_dict(dct, _current_indent_level):
+        if not dct:
+            yield '{}'
+            return
+        if markers is not None:
+            markerid = id(dct)
+            if markerid in markers:
+                raise ValueError("Circular reference detected")
+            markers[markerid] = dct
+        yield '{'
+        if _indent is not None:
+            _current_indent_level += 1
+            newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+            item_separator = _item_separator + newline_indent
+            yield newline_indent
+        else:
+            newline_indent = None
+            item_separator = _item_separator
+        first = True
+        if _sort_keys:
+            items = dct.items()
+            items.sort(key=lambda kv: kv[0])
+        else:
+            items = dct.iteritems()
+        for key, value in items:
+            if isinstance(key, basestring):
+                pass
+            # JavaScript is weakly typed for these, so it makes sense to
+            # also allow them.  Many encoders seem to do something like this.
+            elif isinstance(key, float):
+                key = _floatstr(key)
+            elif key is True:
+                key = 'true'
+            elif key is False:
+                key = 'false'
+            elif key is None:
+                key = 'null'
+            elif isinstance(key, (int, long)):
+                key = str(key)
+            elif _skipkeys:
+                continue
+            else:
+                raise TypeError("key " + repr(key) + " is not a string")
+            if first:
+                first = False
+            else:
+                yield item_separator
+            yield _encoder(key)
+            yield _key_separator
+            if isinstance(value, basestring):
+                yield _encoder(value)
+            elif value is None:
+                yield 'null'
+            elif value is True:
+                yield 'true'
+            elif value is False:
+                yield 'false'
+            elif isinstance(value, (int, long)):
+                yield str(value)
+            elif isinstance(value, float):
+                yield _floatstr(value)
+            else:
+                if isinstance(value, (list, tuple)):
+                    chunks = _iterencode_list(value, _current_indent_level)
+                elif isinstance(value, dict):
+                    chunks = _iterencode_dict(value, _current_indent_level)
+                else:
+                    chunks = _iterencode(value, _current_indent_level)
+                for chunk in chunks:
+                    yield chunk
+        if newline_indent is not None:
+            _current_indent_level -= 1
+            yield '\n' + (' ' * (_indent * _current_indent_level))
+        yield '}'
+        if markers is not None:
+            del markers[markerid]
+
+    def _iterencode(o, _current_indent_level):
+        if isinstance(o, basestring):
+            yield _encoder(o)
+        elif o is None:
+            yield 'null'
+        elif o is True:
+            yield 'true'
+        elif o is False:
+            yield 'false'
+        elif isinstance(o, (int, long)):
+            yield str(o)
+        elif isinstance(o, float):
+            yield _floatstr(o)
+        elif isinstance(o, (list, tuple)):
+            for chunk in _iterencode_list(o, _current_indent_level):
+                yield chunk
+        elif isinstance(o, dict):
+            for chunk in _iterencode_dict(o, _current_indent_level):
+                yield chunk
+        else:
+            if markers is not None:
+                markerid = id(o)
+                if markerid in markers:
+                    raise ValueError("Circular reference detected")
+                markers[markerid] = o
+            o = _default(o)
+            for chunk in _iterencode(o, _current_indent_level):
+                yield chunk
+            if markers is not None:
+                del markers[markerid]
+
+    return _iterencode
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/compat/simplejson/scanner.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/compat/simplejson/scanner.py
new file mode 100644
index 0000000000000000000000000000000000000000..adbc6ec979c9f05d54e1556a6fd007499a953ee6
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/compat/simplejson/scanner.py
@@ -0,0 +1,65 @@
+"""JSON token scanner
+"""
+import re
+try:
+    from simplejson._speedups import make_scanner as c_make_scanner
+except ImportError:
+    c_make_scanner = None
+
+__all__ = ['make_scanner']
+
+NUMBER_RE = re.compile(
+    r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
+    (re.VERBOSE | re.MULTILINE | re.DOTALL))
+
+def py_make_scanner(context):
+    parse_object = context.parse_object
+    parse_array = context.parse_array
+    parse_string = context.parse_string
+    match_number = NUMBER_RE.match
+    encoding = context.encoding
+    strict = context.strict
+    parse_float = context.parse_float
+    parse_int = context.parse_int
+    parse_constant = context.parse_constant
+    object_hook = context.object_hook
+
+    def _scan_once(string, idx):
+        try:
+            nextchar = string[idx]
+        except IndexError:
+            raise StopIteration
+
+        if nextchar == '"':
+            return parse_string(string, idx + 1, encoding, strict)
+        elif nextchar == '{':
+            return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
+        elif nextchar == '[':
+            return parse_array((string, idx + 1), _scan_once)
+        elif nextchar == 'n' and string[idx:idx + 4] == 'null':
+            return None, idx + 4
+        elif nextchar == 't' and string[idx:idx + 4] == 'true':
+            return True, idx + 4
+        elif nextchar == 'f' and string[idx:idx + 5] == 'false':
+            return False, idx + 5
+
+        m = match_number(string, idx)
+        if m is not None:
+            integer, frac, exp = m.groups()
+            if frac or exp:
+                res = parse_float(integer + (frac or '') + (exp or ''))
+            else:
+                res = parse_int(integer)
+            return res, m.end()
+        elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
+            return parse_constant('NaN'), idx + 3
+        elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
+            return parse_constant('Infinity'), idx + 8
+        elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
+            return parse_constant('-Infinity'), idx + 9
+        else:
+            raise StopIteration
+
+    return _scan_once
+
+make_scanner = c_make_scanner or py_make_scanner
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/connection.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/connection.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e08eb15b2970fe9eadaeb6a920c9872386b9836
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/connection.py
@@ -0,0 +1,1056 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import errno
+import logging
+import os
+import pprint
+import stat
+import sys
+import time
+
+import ansible.constants as C
+import ansible.errors
+import ansible.plugins.connection
+import ansible.utils.shlex
+
+import mitogen.core
+import mitogen.fork
+import mitogen.utils
+
+import ansible_mitogen.mixins
+import ansible_mitogen.parsing
+import ansible_mitogen.process
+import ansible_mitogen.services
+import ansible_mitogen.target
+import ansible_mitogen.transport_config
+
+
+LOG = logging.getLogger(__name__)
+
+task_vars_msg = (
+    'could not recover task_vars. This means some connection '
+    'settings may erroneously be reset to their defaults. '
+    'Please report a bug if you encounter this message.'
+)
+
+
+def get_remote_name(spec):
+    """
+    Return the value to use for the "remote_name" parameter.
+    """
+    if spec.mitogen_mask_remote_name():
+        return 'ansible'
+    return None
+
+
+def optional_int(value):
+    """
+    Convert `value` to an integer if it is not :data:`None`, otherwise return
+    :data:`None`.
+    """
+    try:
+        return int(value)
+    except (TypeError, ValueError):
+        return None
+
+
+def convert_bool(obj):
+    if isinstance(obj, bool):
+        return obj
+    if str(obj).lower() in ('no', 'false', '0'):
+        return False
+    if str(obj).lower() not in ('yes', 'true', '1'):
+        raise ansible.errors.AnsibleConnectionFailure(
+            'expected yes/no/true/false/0/1, got %r' % (obj,)
+        )
+    return True
+
+
+def default(value, default):
+    """
+    Return `default` is `value` is :data:`None`, otherwise return `value`.
+    """
+    if value is None:
+        return default
+    return value
+
+
+def _connect_local(spec):
+    """
+    Return ContextService arguments for a local connection.
+    """
+    return {
+        'method': 'local',
+        'kwargs': {
+            'python_path': spec.python_path(),
+        }
+    }
+
+
+def _connect_ssh(spec):
+    """
+    Return ContextService arguments for an SSH connection.
+    """
+    if C.HOST_KEY_CHECKING:
+        check_host_keys = 'enforce'
+    else:
+        check_host_keys = 'ignore'
+
+    # #334: tilde-expand private_key_file to avoid implementation difference
+    # between Python and OpenSSH.
+    private_key_file = spec.private_key_file()
+    if private_key_file is not None:
+        private_key_file = os.path.expanduser(private_key_file)
+
+    return {
+        'method': 'ssh',
+        'kwargs': {
+            'check_host_keys': check_host_keys,
+            'hostname': spec.remote_addr(),
+            'username': spec.remote_user(),
+            'compression': convert_bool(
+                default(spec.mitogen_ssh_compression(), True)
+            ),
+            'password': spec.password(),
+            'port': spec.port(),
+            'python_path': spec.python_path(),
+            'identity_file': private_key_file,
+            'identities_only': False,
+            'ssh_path': spec.ssh_executable(),
+            'connect_timeout': spec.ansible_ssh_timeout(),
+            'ssh_args': spec.ssh_args(),
+            'ssh_debug_level': spec.mitogen_ssh_debug_level(),
+            'remote_name': get_remote_name(spec),
+            'keepalive_count': (
+                spec.mitogen_ssh_keepalive_count() or 10
+            ),
+            'keepalive_interval': (
+                spec.mitogen_ssh_keepalive_interval() or 30
+            ),
+        }
+    }
+
+def _connect_buildah(spec):
+    """
+    Return ContextService arguments for a Buildah connection.
+    """
+    return {
+        'method': 'buildah',
+        'kwargs': {
+            'username': spec.remote_user(),
+            'container': spec.remote_addr(),
+            'python_path': spec.python_path(),
+            'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
+            'remote_name': get_remote_name(spec),
+        }
+    }
+
+def _connect_docker(spec):
+    """
+    Return ContextService arguments for a Docker connection.
+    """
+    return {
+        'method': 'docker',
+        'kwargs': {
+            'username': spec.remote_user(),
+            'container': spec.remote_addr(),
+            'python_path': spec.python_path(),
+            'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
+            'remote_name': get_remote_name(spec),
+        }
+    }
+
+
+def _connect_kubectl(spec):
+    """
+    Return ContextService arguments for a Kubernetes connection.
+    """
+    return {
+        'method': 'kubectl',
+        'kwargs': {
+            'pod': spec.remote_addr(),
+            'python_path': spec.python_path(),
+            'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
+            'kubectl_path': spec.mitogen_kubectl_path(),
+            'kubectl_args': spec.extra_args(),
+            'remote_name': get_remote_name(spec),
+        }
+    }
+
+
+def _connect_jail(spec):
+    """
+    Return ContextService arguments for a FreeBSD jail connection.
+    """
+    return {
+        'method': 'jail',
+        'kwargs': {
+            'username': spec.remote_user(),
+            'container': spec.remote_addr(),
+            'python_path': spec.python_path(),
+            'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
+            'remote_name': get_remote_name(spec),
+        }
+    }
+
+
+def _connect_lxc(spec):
+    """
+    Return ContextService arguments for an LXC Classic container connection.
+    """
+    return {
+        'method': 'lxc',
+        'kwargs': {
+            'container': spec.remote_addr(),
+            'python_path': spec.python_path(),
+            'lxc_attach_path': spec.mitogen_lxc_attach_path(),
+            'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
+            'remote_name': get_remote_name(spec),
+        }
+    }
+
+
+def _connect_lxd(spec):
+    """
+    Return ContextService arguments for an LXD container connection.
+    """
+    return {
+        'method': 'lxd',
+        'kwargs': {
+            'container': spec.remote_addr(),
+            'python_path': spec.python_path(),
+            'lxc_path': spec.mitogen_lxc_path(),
+            'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
+            'remote_name': get_remote_name(spec),
+        }
+    }
+
+
+def _connect_machinectl(spec):
+    """
+    Return ContextService arguments for a machinectl connection.
+    """
+    return _connect_setns(spec, kind='machinectl')
+
+
+def _connect_setns(spec, kind=None):
+    """
+    Return ContextService arguments for a mitogen_setns connection.
+    """
+    return {
+        'method': 'setns',
+        'kwargs': {
+            'container': spec.remote_addr(),
+            'username': spec.remote_user(),
+            'python_path': spec.python_path(),
+            'kind': kind or spec.mitogen_kind(),
+            'docker_path': spec.mitogen_docker_path(),
+            'lxc_path': spec.mitogen_lxc_path(),
+            'lxc_info_path': spec.mitogen_lxc_info_path(),
+            'machinectl_path': spec.mitogen_machinectl_path(),
+        }
+    }
+
+
+def _connect_su(spec):
+    """
+    Return ContextService arguments for su as a become method.
+    """
+    return {
+        'method': 'su',
+        'enable_lru': True,
+        'kwargs': {
+            'username': spec.become_user(),
+            'password': spec.become_pass(),
+            'python_path': spec.python_path(),
+            'su_path': spec.become_exe(),
+            'connect_timeout': spec.timeout(),
+            'remote_name': get_remote_name(spec),
+        }
+    }
+
+
+def _connect_sudo(spec):
+    """
+    Return ContextService arguments for sudo as a become method.
+    """
+    return {
+        'method': 'sudo',
+        'enable_lru': True,
+        'kwargs': {
+            'username': spec.become_user(),
+            'password': spec.become_pass(),
+            'python_path': spec.python_path(),
+            'sudo_path': spec.become_exe(),
+            'connect_timeout': spec.timeout(),
+            'sudo_args': spec.sudo_args(),
+            'remote_name': get_remote_name(spec),
+        }
+    }
+
+
+def _connect_doas(spec):
+    """
+    Return ContextService arguments for doas as a become method.
+    """
+    return {
+        'method': 'doas',
+        'enable_lru': True,
+        'kwargs': {
+            'username': spec.become_user(),
+            'password': spec.become_pass(),
+            'python_path': spec.python_path(),
+            'doas_path': spec.become_exe(),
+            'connect_timeout': spec.timeout(),
+            'remote_name': get_remote_name(spec),
+        }
+    }
+
+
+def _connect_mitogen_su(spec):
+    """
+    Return ContextService arguments for su as a first class connection.
+    """
+    return {
+        'method': 'su',
+        'kwargs': {
+            'username': spec.remote_user(),
+            'password': spec.password(),
+            'python_path': spec.python_path(),
+            'su_path': spec.become_exe(),
+            'connect_timeout': spec.timeout(),
+            'remote_name': get_remote_name(spec),
+        }
+    }
+
+
+def _connect_mitogen_sudo(spec):
+    """
+    Return ContextService arguments for sudo as a first class connection.
+    """
+    return {
+        'method': 'sudo',
+        'kwargs': {
+            'username': spec.remote_user(),
+            'password': spec.password(),
+            'python_path': spec.python_path(),
+            'sudo_path': spec.become_exe(),
+            'connect_timeout': spec.timeout(),
+            'sudo_args': spec.sudo_args(),
+            'remote_name': get_remote_name(spec),
+        }
+    }
+
+
+def _connect_mitogen_doas(spec):
+    """
+    Return ContextService arguments for doas as a first class connection.
+    """
+    return {
+        'method': 'doas',
+        'kwargs': {
+            'username': spec.remote_user(),
+            'password': spec.password(),
+            'python_path': spec.python_path(),
+            'doas_path': spec.ansible_doas_exe(),
+            'connect_timeout': spec.timeout(),
+            'remote_name': get_remote_name(spec),
+        }
+    }
+
+
+#: Mapping of connection method names to functions invoked as `func(spec)`
+#: generating ContextService keyword arguments matching a connection
+#: specification.
+CONNECTION_METHOD = {
+    'buildah': _connect_buildah,
+    'docker': _connect_docker,
+    'kubectl': _connect_kubectl,
+    'jail': _connect_jail,
+    'local': _connect_local,
+    'lxc': _connect_lxc,
+    'lxd': _connect_lxd,
+    'machinectl': _connect_machinectl,
+    'setns': _connect_setns,
+    'ssh': _connect_ssh,
+    'smart': _connect_ssh,  # issue #548.
+    'su': _connect_su,
+    'sudo': _connect_sudo,
+    'doas': _connect_doas,
+    'mitogen_su': _connect_mitogen_su,
+    'mitogen_sudo': _connect_mitogen_sudo,
+    'mitogen_doas': _connect_mitogen_doas,
+}
+
+
+class CallChain(mitogen.parent.CallChain):
+    """
+    Extend :class:`mitogen.parent.CallChain` to additionally cause the
+    associated :class:`Connection` to be reset if a ChannelError occurs.
+
+    This only catches failures that occur while a call is pending, it is a
+    stop-gap until a more general method is available to notice connection in
+    every situation.
+    """
+    call_aborted_msg = (
+        'Mitogen was disconnected from the remote environment while a call '
+        'was in-progress. If you feel this is in error, please file a bug. '
+        'Original error was: %s'
+    )
+
+    def __init__(self, connection, context, pipelined=False):
+        super(CallChain, self).__init__(context, pipelined)
+        #: The connection to reset on CallError.
+        self._connection = connection
+
+    def _rethrow(self, recv):
+        try:
+            return recv.get().unpickle()
+        except mitogen.core.ChannelError as e:
+            self._connection.reset()
+            raise ansible.errors.AnsibleConnectionFailure(
+                self.call_aborted_msg % (e,)
+            )
+
+    def call(self, func, *args, **kwargs):
+        """
+        Like :meth:`mitogen.parent.CallChain.call`, but log timings.
+        """
+        t0 = time.time()
+        try:
+            recv = self.call_async(func, *args, **kwargs)
+            return self._rethrow(recv)
+        finally:
+            LOG.debug('Call took %d ms: %r', 1000 * (time.time() - t0),
+                      mitogen.parent.CallSpec(func, args, kwargs))
+
+
+class Connection(ansible.plugins.connection.ConnectionBase):
+    #: The :class:`ansible_mitogen.process.Binding` representing the connection
+    #: multiplexer this connection's target is assigned to. :data:`None` when
+    #: disconnected.
+    binding = None
+
+    #: mitogen.parent.Context for the target account on the target, possibly
+    #: reached via become.
+    context = None
+
+    #: Context for the login account on the target. This is always the login
+    #: account, even when become=True.
+    login_context = None
+
+    #: Only sudo, su, and doas are supported for now.
+    become_methods = ['sudo', 'su', 'doas']
+
+    #: Dict containing init_child() return value as recorded at startup by
+    #: ContextService. Contains:
+    #:
+    #:  fork_context:   Context connected to the fork parent : process in the
+    #:                  target account.
+    #:  home_dir:       Target context's home directory.
+    #:  good_temp_dir:  A writeable directory where new temporary directories
+    #:                  can be created.
+    init_child_result = None
+
+    #: A :class:`mitogen.parent.CallChain` for calls made to the target
+    #: account, to ensure subsequent calls fail with the original exception if
+    #: pipelined directory creation or file transfer fails.
+    chain = None
+
+    #
+    # Note: any of the attributes below may be :data:`None` if the connection
+    # plugin was constructed directly by a non-cooperative action, such as in
+    # the case of the synchronize module.
+    #
+
+    #: Set to task_vars by on_action_run().
+    _task_vars = None
+
+    #: Set by on_action_run()
+    delegate_to_hostname = None
+
+    #: Set to '_loader.get_basedir()' by on_action_run(). Used by mitogen_local
+    #: to change the working directory to that of the current playbook,
+    #: matching vanilla Ansible behaviour.
+    loader_basedir = None
+
+    def __del__(self):
+        """
+        Ansible cannot be trusted to always call close() e.g. the synchronize
+        action constructs a local connection like this. So provide a destructor
+        in the hopes of catching these cases.
+        """
+        # https://github.com/dw/mitogen/issues/140
+        self.close()
+
+    def on_action_run(self, task_vars, delegate_to_hostname, loader_basedir):
+        """
+        Invoked by ActionModuleMixin to indicate a new task is about to start
+        executing. We use the opportunity to grab relevant bits from the
+        task-specific data.
+
+        :param dict task_vars:
+            Task variable dictionary.
+        :param str delegate_to_hostname:
+            :data:`None`, or the template-expanded inventory hostname this task
+            is being delegated to. A similar variable exists on PlayContext
+            when ``delegate_to:`` is active, however it is unexpanded.
+        :param str loader_basedir:
+            Loader base directory; see :attr:`loader_basedir`.
+        """
+        self._task_vars = task_vars
+        self.delegate_to_hostname = delegate_to_hostname
+        self.loader_basedir = loader_basedir
+        self._put_connection()
+
+    def _get_task_vars(self):
+        """
+        More information is needed than normally provided to an Ansible
+        connection.  For proxied connections, intermediary configuration must
+        be inferred, and for any connection the configured Python interpreter
+        must be known.
+
+        There is no clean way to access this information that would not deviate
+        from the running Ansible version. The least invasive method known is to
+        reuse the running task's task_vars dict.
+
+        This method walks the stack to find task_vars of the Action plugin's
+        run(), or if no Action is present, from Strategy's _execute_meta(), as
+        in the case of 'meta: reset_connection'. The stack is walked in
+        addition to subclassing Action.run()/on_action_run(), as it is possible
+        for new connections to be constructed in addition to the preconstructed
+        connection passed into any running action.
+        """
+        if self._task_vars is not None:
+            return self._task_vars
+
+        f = sys._getframe()
+        while f:
+            if f.f_code.co_name == 'run':
+                f_locals = f.f_locals
+                f_self = f_locals.get('self')
+                if isinstance(f_self, ansible_mitogen.mixins.ActionModuleMixin):
+                    task_vars = f_locals.get('task_vars')
+                    if task_vars:
+                        LOG.debug('recovered task_vars from Action')
+                        return task_vars
+            elif f.f_code.co_name == '_execute_meta':
+                f_all_vars = f.f_locals.get('all_vars')
+                if isinstance(f_all_vars, dict):
+                    LOG.debug('recovered task_vars from meta:')
+                    return f_all_vars
+
+            f = f.f_back
+
+        raise ansible.errors.AnsibleConnectionFailure(task_vars_msg)
+
+    def get_host_vars(self, inventory_hostname):
+        """
+        Fetch the HostVars for a host.
+
+        :returns:
+            Variables dictionary or :data:`None`.
+        :raises ansible.errors.AnsibleConnectionFailure:
+            Task vars unavailable.
+        """
+        task_vars = self._get_task_vars()
+        hostvars = task_vars.get('hostvars')
+        if hostvars:
+            return hostvars.get(inventory_hostname)
+
+        raise ansible.errors.AnsibleConnectionFailure(task_vars_msg)
+
+    def get_task_var(self, key, default=None):
+        """
+        Fetch the value of a task variable related to connection configuration,
+        or, if delegate_to is active, fetch the same variable via HostVars for
+        the delegated-to machine.
+
+        When running with delegate_to, Ansible tasks have variables associated
+        with the original machine, not the delegated-to machine, therefore it
+        does not make sense to extract connection-related configuration for the
+        delegated-to machine from them.
+        """
+        task_vars = self._get_task_vars()
+        if self.delegate_to_hostname is None:
+            if key in task_vars:
+                return task_vars[key]
+        else:
+            delegated_vars = task_vars['ansible_delegated_vars']
+            if self.delegate_to_hostname in delegated_vars:
+                task_vars = delegated_vars[self.delegate_to_hostname]
+                if key in task_vars:
+                    return task_vars[key]
+
+        return default
+
+    @property
+    def homedir(self):
+        self._connect()
+        return self.init_child_result['home_dir']
+
+    def get_binding(self):
+        """
+        Return the :class:`ansible_mitogen.process.Binding` representing the
+        process that hosts the physical connection and services (context
+        establishment, file transfer, ..) for our desired target.
+        """
+        assert self.binding is not None
+        return self.binding
+
+    @property
+    def connected(self):
+        return self.context is not None
+
+    def _spec_from_via(self, proxied_inventory_name, via_spec):
+        """
+        Produce a dict connection specifiction given a string `via_spec`, of
+        the form `[[become_method:]become_user@]inventory_hostname`.
+        """
+        become_user, _, inventory_name = via_spec.rpartition('@')
+        become_method, _, become_user = become_user.rpartition(':')
+
+        # must use __contains__ to avoid a TypeError for a missing host on
+        # Ansible 2.3.
+        via_vars = self.get_host_vars(inventory_name)
+        if via_vars is None:
+            raise ansible.errors.AnsibleConnectionFailure(
+                self.unknown_via_msg % (
+                    via_spec,
+                    proxied_inventory_name,
+                )
+            )
+
+        return ansible_mitogen.transport_config.MitogenViaSpec(
+            inventory_name=inventory_name,
+            play_context=self._play_context,
+            host_vars=dict(via_vars),  # TODO: make it lazy
+            become_method=become_method or None,
+            become_user=become_user or None,
+        )
+
+    unknown_via_msg = 'mitogen_via=%s of %s specifies an unknown hostname'
+    via_cycle_msg = 'mitogen_via=%s of %s creates a cycle (%s)'
+
+    def _stack_from_spec(self, spec, stack=(), seen_names=()):
+        """
+        Return a tuple of ContextService parameter dictionaries corresponding
+        to the connection described by `spec`, and any connection referenced by
+        its `mitogen_via` or `become` fields. Each element is a dict of the
+        form::
+
+            {
+                # Optional. If present and `True`, this hop is elegible for
+                # interpreter recycling.
+                "enable_lru": True,
+                # mitogen.master.Router method name.
+                "method": "ssh",
+                # mitogen.master.Router method kwargs.
+                "kwargs": {
+                    "hostname": "..."
+                }
+            }
+
+        :param ansible_mitogen.transport_config.Spec spec:
+            Connection specification.
+        :param tuple stack:
+            Stack elements from parent call (used for recursion).
+        :param tuple seen_names:
+            Inventory hostnames from parent call (cycle detection).
+        :returns:
+            Tuple `(stack, seen_names)`.
+        """
+        if spec.inventory_name() in seen_names:
+            raise ansible.errors.AnsibleConnectionFailure(
+                self.via_cycle_msg % (
+                    spec.mitogen_via(),
+                    spec.inventory_name(),
+                    ' -> '.join(reversed(
+                        seen_names + (spec.inventory_name(),)
+                    )),
+                )
+            )
+
+        if spec.mitogen_via():
+            stack = self._stack_from_spec(
+                self._spec_from_via(spec.inventory_name(), spec.mitogen_via()),
+                stack=stack,
+                seen_names=seen_names + (spec.inventory_name(),),
+            )
+
+        stack += (CONNECTION_METHOD[spec.transport()](spec),)
+        if spec.become() and ((spec.become_user() != spec.remote_user()) or
+                              C.BECOME_ALLOW_SAME_USER):
+            stack += (CONNECTION_METHOD[spec.become_method()](spec),)
+
+        return stack
+
+    def _build_stack(self):
+        """
+        Construct a list of dictionaries representing the connection
+        configuration between the controller and the target. This is
+        additionally used by the integration tests "mitogen_get_stack" action
+        to fetch the would-be connection configuration.
+        """
+        spec = ansible_mitogen.transport_config.PlayContextSpec(
+            connection=self,
+            play_context=self._play_context,
+            transport=self.transport,
+            inventory_name=self.get_task_var('inventory_hostname'),
+        )
+        stack = self._stack_from_spec(spec)
+        return spec.inventory_name(), stack
+
+    def _connect_stack(self, stack):
+        """
+        Pass `stack` to ContextService, requesting a copy of the context object
+        representing the last tuple element. If no connection exists yet,
+        ContextService will recursively establish it before returning it or
+        throwing an error.
+
+        See :meth:`ansible_mitogen.services.ContextService.get` docstring for
+        description of the returned dictionary.
+        """
+        try:
+            dct = mitogen.service.call(
+                call_context=self.binding.get_service_context(),
+                service_name='ansible_mitogen.services.ContextService',
+                method_name='get',
+                stack=mitogen.utils.cast(list(stack)),
+            )
+        except mitogen.core.CallError:
+            LOG.warning('Connection failed; stack configuration was:\n%s',
+                        pprint.pformat(stack))
+            raise
+
+        if dct['msg']:
+            if dct['method_name'] in self.become_methods:
+                raise ansible.errors.AnsibleModuleError(dct['msg'])
+            raise ansible.errors.AnsibleConnectionFailure(dct['msg'])
+
+        self.context = dct['context']
+        self.chain = CallChain(self, self.context, pipelined=True)
+        if self._play_context.become:
+            self.login_context = dct['via']
+        else:
+            self.login_context = self.context
+
+        self.init_child_result = dct['init_child_result']
+
+    def get_good_temp_dir(self):
+        """
+        Return the 'good temporary directory' as discovered by
+        :func:`ansible_mitogen.target.init_child` immediately after
+        ContextService constructed the target context.
+        """
+        self._connect()
+        return self.init_child_result['good_temp_dir']
+
+    def _connect(self):
+        """
+        Establish a connection to the master process's UNIX listener socket,
+        constructing a mitogen.master.Router to communicate with the master,
+        and a mitogen.parent.Context to represent it.
+
+        Depending on the original transport we should emulate, trigger one of
+        the _connect_*() service calls defined above to cause the master
+        process to establish the real connection on our behalf, or return a
+        reference to the existing one.
+        """
+        if self.connected:
+            return
+
+        inventory_name, stack = self._build_stack()
+        worker_model = ansible_mitogen.process.get_worker_model()
+        self.binding = worker_model.get_binding(
+            mitogen.utils.cast(inventory_name)
+        )
+        self._connect_stack(stack)
+
+    def _put_connection(self):
+        """
+        Forget everything we know about the connected context. This function
+        cannot be called _reset() since that name is used as a public API by
+        Ansible 2.4 wait_for_connection plug-in.
+        """
+        if not self.context:
+            return
+
+        self.chain.reset()
+        mitogen.service.call(
+            call_context=self.binding.get_service_context(),
+            service_name='ansible_mitogen.services.ContextService',
+            method_name='put',
+            context=self.context
+        )
+
+        self.context = None
+        self.login_context = None
+        self.init_child_result = None
+        self.chain = None
+
+    def close(self):
+        """
+        Arrange for the mitogen.master.Router running in the worker to
+        gracefully shut down, and wait for shutdown to complete. Safe to call
+        multiple times.
+        """
+        self._put_connection()
+        if self.binding:
+            self.binding.close()
+            self.binding = None
+
+    reset_compat_msg = (
+        'Mitogen only supports "reset_connection" on Ansible 2.5.6 or later'
+    )
+
+    def reset(self):
+        """
+        Explicitly terminate the connection to the remote host. This discards
+        any local state we hold for the connection, returns the Connection to
+        the 'disconnected' state, and informs ContextService the connection is
+        bad somehow, and should be shut down and discarded.
+        """
+        if self._play_context.remote_addr is None:
+            # <2.5.6 incorrectly populate PlayContext for reset_connection
+            # https://github.com/ansible/ansible/issues/27520
+            raise ansible.errors.AnsibleConnectionFailure(
+                self.reset_compat_msg
+            )
+
+        # Clear out state in case we were ever connected.
+        self.close()
+
+        inventory_name, stack = self._build_stack()
+        if self._play_context.become:
+            stack = stack[:-1]
+
+        worker_model = ansible_mitogen.process.get_worker_model()
+        binding = worker_model.get_binding(inventory_name)
+        try:
+            mitogen.service.call(
+                call_context=binding.get_service_context(),
+                service_name='ansible_mitogen.services.ContextService',
+                method_name='reset',
+                stack=mitogen.utils.cast(list(stack)),
+            )
+        finally:
+            binding.close()
+
+    # Compatibility with Ansible 2.4 wait_for_connection plug-in.
+    _reset = reset
+
+    def get_chain(self, use_login=False, use_fork=False):
+        """
+        Return the :class:`mitogen.parent.CallChain` to use for executing
+        function calls.
+
+        :param bool use_login:
+            If :data:`True`, always return the chain for the login account
+            rather than any active become user.
+        :param bool use_fork:
+            If :data:`True`, return the chain for the fork parent.
+        :returns mitogen.parent.CallChain:
+        """
+        self._connect()
+        if use_login:
+            return self.login_context.default_call_chain
+        # See FORK_SUPPORTED comments in target.py.
+        if use_fork and self.init_child_result['fork_context'] is not None:
+            return self.init_child_result['fork_context'].default_call_chain
+        return self.chain
+
+    def spawn_isolated_child(self):
+        """
+        Fork or launch a new child off the target context.
+
+        :returns:
+            mitogen.core.Context of the new child.
+        """
+        return self.get_chain(use_fork=True).call(
+            ansible_mitogen.target.spawn_isolated_child
+        )
+
+    def get_extra_args(self):
+        """
+        Overridden by connections/mitogen_kubectl.py to a list of additional
+        arguments for the command.
+        """
+        # TODO: maybe use this for SSH too.
+        return []
+
+    def get_default_cwd(self):
+        """
+        Overridden by connections/mitogen_local.py to emulate behaviour of CWD
+        being fixed to that of ActionBase._loader.get_basedir().
+        """
+        return None
+
+    def get_default_env(self):
+        """
+        Overridden by connections/mitogen_local.py to emulate behaviour of
+        WorkProcess environment inherited from WorkerProcess.
+        """
+        return None
+
+    def exec_command(self, cmd, in_data='', sudoable=True, mitogen_chdir=None):
+        """
+        Implement exec_command() by calling the corresponding
+        ansible_mitogen.target function in the target.
+
+        :param str cmd:
+            Shell command to execute.
+        :param bytes in_data:
+            Data to supply on ``stdin`` of the process.
+        :returns:
+            (return code, stdout bytes, stderr bytes)
+        """
+        emulate_tty = (not in_data and sudoable)
+        rc, stdout, stderr = self.get_chain().call(
+            ansible_mitogen.target.exec_command,
+            cmd=mitogen.utils.cast(cmd),
+            in_data=mitogen.utils.cast(in_data),
+            chdir=mitogen_chdir or self.get_default_cwd(),
+            emulate_tty=emulate_tty,
+        )
+
+        stderr += b'Shared connection to %s closed.%s' % (
+            self._play_context.remote_addr.encode(),
+            (b'\r\n' if emulate_tty else b'\n'),
+        )
+        return rc, stdout, stderr
+
+    def fetch_file(self, in_path, out_path):
+        """
+        Implement fetch_file() by calling the corresponding
+        ansible_mitogen.target function in the target.
+
+        :param str in_path:
+            Remote filesystem path to read.
+        :param str out_path:
+            Local filesystem path to write.
+        """
+        self._connect()
+        ansible_mitogen.target.transfer_file(
+            context=self.context,
+            # in_path may be AnsibleUnicode
+            in_path=mitogen.utils.cast(in_path),
+            out_path=out_path
+        )
+
+    def put_data(self, out_path, data, mode=None, utimes=None):
+        """
+        Implement put_file() by caling the corresponding ansible_mitogen.target
+        function in the target, transferring small files inline. This is
+        pipelined and will return immediately; failed transfers are reported as
+        exceptions in subsequent functon calls.
+
+        :param str out_path:
+            Remote filesystem path to write.
+        :param byte data:
+            File contents to put.
+        """
+        self.get_chain().call_no_reply(
+            ansible_mitogen.target.write_path,
+            mitogen.utils.cast(out_path),
+            mitogen.core.Blob(data),
+            mode=mode,
+            utimes=utimes,
+        )
+
+    #: Maximum size of a small file before switching to streaming
+    #: transfer. This should really be the same as
+    #: mitogen.services.FileService.IO_SIZE, however the message format has
+    #: slightly more overhead, so just randomly subtract 4KiB.
+    SMALL_FILE_LIMIT = mitogen.core.CHUNK_SIZE - 4096
+
+    def _throw_io_error(self, e, path):
+        if e.args[0] == errno.ENOENT:
+            s = 'file or module does not exist: ' + path
+            raise ansible.errors.AnsibleFileNotFound(s)
+
+    def put_file(self, in_path, out_path):
+        """
+        Implement put_file() by streamily transferring the file via
+        FileService.
+
+        :param str in_path:
+            Local filesystem path to read.
+        :param str out_path:
+            Remote filesystem path to write.
+        """
+        try:
+            st = os.stat(in_path)
+        except OSError as e:
+            self._throw_io_error(e, in_path)
+            raise
+
+        if not stat.S_ISREG(st.st_mode):
+            raise IOError('%r is not a regular file.' % (in_path,))
+
+        # If the file is sufficiently small, just ship it in the argument list
+        # rather than introducing an extra RTT for the child to request it from
+        # FileService.
+        if st.st_size <= self.SMALL_FILE_LIMIT:
+            try:
+                fp = open(in_path, 'rb')
+                try:
+                    s = fp.read(self.SMALL_FILE_LIMIT + 1)
+                finally:
+                    fp.close()
+            except OSError:
+                self._throw_io_error(e, in_path)
+                raise
+
+            # Ensure did not grow during read.
+            if len(s) == st.st_size:
+                return self.put_data(out_path, s, mode=st.st_mode,
+                                     utimes=(st.st_atime, st.st_mtime))
+
+        self._connect()
+        mitogen.service.call(
+            call_context=self.binding.get_service_context(),
+            service_name='mitogen.service.FileService',
+            method_name='register',
+            path=mitogen.utils.cast(in_path)
+        )
+
+        # For now this must remain synchronous, as the action plug-in may have
+        # passed us a temporary file to transfer. A future FileService could
+        # maintain an LRU list of open file descriptors to keep the temporary
+        # file alive, but that requires more work.
+        self.get_chain().call(
+            ansible_mitogen.target.transfer_file,
+            context=self.binding.get_child_service_context(),
+            in_path=in_path,
+            out_path=out_path
+        )
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/loaders.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/loaders.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ce6b1fa96f00e5a091e8004ee14bb79baee028a
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/loaders.py
@@ -0,0 +1,62 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+"""
+Stable names for PluginLoader instances across Ansible versions.
+"""
+
+from __future__ import absolute_import
+
+__all__ = [
+    'action_loader',
+    'connection_loader',
+    'module_loader',
+    'module_utils_loader',
+    'shell_loader',
+    'strategy_loader',
+]
+
+try:
+    from ansible.plugins.loader import action_loader
+    from ansible.plugins.loader import connection_loader
+    from ansible.plugins.loader import module_loader
+    from ansible.plugins.loader import module_utils_loader
+    from ansible.plugins.loader import shell_loader
+    from ansible.plugins.loader import strategy_loader
+except ImportError:  # Ansible <2.4
+    from ansible.plugins import action_loader
+    from ansible.plugins import connection_loader
+    from ansible.plugins import module_loader
+    from ansible.plugins import module_utils_loader
+    from ansible.plugins import shell_loader
+    from ansible.plugins import strategy_loader
+
+
+# These are original, unwrapped implementations
+action_loader__get = action_loader.get
+connection_loader__get = connection_loader.get
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/logging.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..00a701842740ec847cabde0e6e32854a06e6e9af
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/logging.py
@@ -0,0 +1,128 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import logging
+import os
+
+import mitogen.core
+import mitogen.utils
+
+try:
+    from __main__ import display
+except ImportError:
+    from ansible.utils.display import Display
+    display = Display()
+
+
+#: The process name set via :func:`set_process_name`.
+_process_name = None
+
+#: The PID of the process that last called :func:`set_process_name`, so its
+#: value can be ignored in unknown fork children.
+_process_pid = None
+
+
+def set_process_name(name):
+    """
+    Set a name to adorn log messages with.
+    """
+    global _process_name
+    _process_name = name
+
+    global _process_pid
+    _process_pid = os.getpid()
+
+
+class Handler(logging.Handler):
+    """
+    Use Mitogen's log format, but send the result to a Display method.
+    """
+    def __init__(self, normal_method):
+        logging.Handler.__init__(self)
+        self.formatter = mitogen.utils.log_get_formatter()
+        self.normal_method = normal_method
+
+    #: Set of target loggers that produce warnings and errors that spam the
+    #: console needlessly. Their log level is forced to INFO. A better strategy
+    #: may simply be to bury all target logs in DEBUG output, but not by
+    #: overriding their log level as done here.
+    NOISY_LOGGERS = frozenset([
+        'dnf',   # issue #272; warns when a package is already installed.
+        'boto',  # issue #541; normal boto retry logic can cause ERROR logs.
+    ])
+
+    def emit(self, record):
+        mitogen_name = getattr(record, 'mitogen_name', '')
+        if mitogen_name == 'stderr':
+            record.levelno = logging.ERROR
+        if mitogen_name in self.NOISY_LOGGERS and record.levelno >= logging.WARNING:
+            record.levelno = logging.DEBUG
+
+        if _process_pid == os.getpid():
+            process_name = _process_name
+        else:
+            process_name = '?'
+
+        s = '[%-4s %d] %s' % (process_name, os.getpid(), self.format(record))
+        if record.levelno >= logging.ERROR:
+            display.error(s, wrap_text=False)
+        elif record.levelno >= logging.WARNING:
+            display.warning(s, formatted=True)
+        else:
+            self.normal_method(s)
+
+
+def setup():
+    """
+    Install handlers for Mitogen loggers to redirect them into the Ansible
+    display framework. Ansible installs its own logging framework handlers when
+    C.DEFAULT_LOG_PATH is set, therefore disable propagation for our handlers.
+    """
+    l_mitogen = logging.getLogger('mitogen')
+    l_mitogen_io = logging.getLogger('mitogen.io')
+    l_ansible_mitogen = logging.getLogger('ansible_mitogen')
+    l_operon = logging.getLogger('operon')
+
+    for logger in l_mitogen, l_mitogen_io, l_ansible_mitogen, l_operon:
+        logger.handlers = [Handler(display.vvv)]
+        logger.propagate = False
+
+    if display.verbosity > 2:
+        l_ansible_mitogen.setLevel(logging.DEBUG)
+        l_mitogen.setLevel(logging.DEBUG)
+    else:
+        # Mitogen copies the active log level into new children, allowing them
+        # to filter tiny messages before they hit the network, and therefore
+        # before they wake the IO loop. Explicitly setting INFO saves ~4%
+        # running against just the local machine.
+        l_mitogen.setLevel(logging.ERROR)
+        l_ansible_mitogen.setLevel(logging.ERROR)
+
+    if display.verbosity > 3:
+        l_mitogen_io.setLevel(logging.DEBUG)
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/mixins.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/mixins.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfdf838484be5e753f91af664394d37b53446441
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/mixins.py
@@ -0,0 +1,428 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import logging
+import os
+import pwd
+import random
+import traceback
+
+try:
+    from shlex import quote as shlex_quote
+except ImportError:
+    from pipes import quote as shlex_quote
+
+from ansible.module_utils._text import to_bytes
+from ansible.parsing.utils.jsonify import jsonify
+
+import ansible
+import ansible.constants
+import ansible.plugins
+import ansible.plugins.action
+
+import mitogen.core
+import mitogen.select
+import mitogen.utils
+
+import ansible_mitogen.connection
+import ansible_mitogen.planner
+import ansible_mitogen.target
+from ansible.module_utils._text import to_text
+
+try:
+    from ansible.utils.unsafe_proxy import wrap_var
+except ImportError:
+    from ansible.vars.unsafe_proxy import wrap_var
+
+
+LOG = logging.getLogger(__name__)
+
+
+class ActionModuleMixin(ansible.plugins.action.ActionBase):
+    """
+    The Mitogen-patched PluginLoader dynamically mixes this into every action
+    class that Ansible attempts to load. It exists to override all the
+    assumptions built into the base action class that should really belong in
+    some middle layer, or at least in the connection layer.
+
+    Functionality is defined here for:
+
+    * Capturing the final set of task variables and giving Connection a chance
+      to update its idea of the correct execution environment, before any
+      attempt is made to call a Connection method. While it's not expected for
+      the interpreter to change on a per-task basis, Ansible permits this, and
+      so it must be supported.
+
+    * Overriding lots of methods that try to call out to shell for mundane
+      reasons, such as copying files around, changing file permissions,
+      creating temporary directories and suchlike.
+
+    * Short-circuiting any use of Ansiballz or related code for executing a
+      module remotely using shell commands and SSH.
+
+    * Short-circuiting most of the logic in dealing with the fact that Ansible
+      always runs become: tasks across at least the SSH user account and the
+      destination user account, and handling the security permission issues
+      that crop up due to this. Mitogen always runs a task completely within
+      the target user account, so it's not a problem for us.
+    """
+    def __init__(self, task, connection, *args, **kwargs):
+        """
+        Verify the received connection is really a Mitogen connection. If not,
+        transmute this instance back into the original unadorned base class.
+
+        This allows running the Mitogen strategy in mixed-target playbooks,
+        where some targets use SSH while others use WinRM or some fancier UNIX
+        connection plug-in. That's because when the Mitogen strategy is active,
+        ActionModuleMixin is unconditionally mixed into any action module that
+        is instantiated, and there is no direct way for the monkey-patch to
+        know what kind of connection will be used upfront.
+        """
+        super(ActionModuleMixin, self).__init__(task, connection, *args, **kwargs)
+        if not isinstance(connection, ansible_mitogen.connection.Connection):
+            _, self.__class__ = type(self).__bases__
+
+    def run(self, tmp=None, task_vars=None):
+        """
+        Override run() to notify Connection of task-specific data, so it has a
+        chance to know e.g. the Python interpreter in use.
+        """
+        self._connection.on_action_run(
+            task_vars=task_vars,
+            delegate_to_hostname=self._task.delegate_to,
+            loader_basedir=self._loader.get_basedir(),
+        )
+        return super(ActionModuleMixin, self).run(tmp, task_vars)
+
+    COMMAND_RESULT = {
+        'rc': 0,
+        'stdout': '',
+        'stdout_lines': [],
+        'stderr': ''
+    }
+
+    def fake_shell(self, func, stdout=False):
+        """
+        Execute a function and decorate its return value in the style of
+        _low_level_execute_command(). This produces a return value that looks
+        like some shell command was run, when really func() was implemented
+        entirely in Python.
+
+        If the function raises :py:class:`mitogen.core.CallError`, this will be
+        translated into a failed shell command with a non-zero exit status.
+
+        :param func:
+            Function invoked as `func()`.
+        :returns:
+            See :py:attr:`COMMAND_RESULT`.
+        """
+        dct = self.COMMAND_RESULT.copy()
+        try:
+            rc = func()
+            if stdout:
+                dct['stdout'] = repr(rc)
+        except mitogen.core.CallError:
+            LOG.exception('While emulating a shell command')
+            dct['rc'] = 1
+            dct['stderr'] = traceback.format_exc()
+
+        return dct
+
+    def _remote_file_exists(self, path):
+        """
+        Determine if `path` exists by directly invoking os.path.exists() in the
+        target user account.
+        """
+        LOG.debug('_remote_file_exists(%r)', path)
+        return self._connection.get_chain().call(
+            ansible_mitogen.target.file_exists,
+            mitogen.utils.cast(path)
+        )
+
+    def _configure_module(self, module_name, module_args, task_vars=None):
+        """
+        Mitogen does not use the Ansiballz framework. This call should never
+        happen when ActionMixin is active, so crash if it does.
+        """
+        assert False, "_configure_module() should never be called."
+
+    def _is_pipelining_enabled(self, module_style, wrap_async=False):
+        """
+        Mitogen does not use SSH pipelining. This call should never happen when
+        ActionMixin is active, so crash if it does.
+        """
+        assert False, "_is_pipelining_enabled() should never be called."
+
+    def _generate_tmp_path(self):
+        return os.path.join(
+            self._connection.get_good_temp_dir(),
+            'ansible_mitogen_action_%016x' % (
+                random.getrandbits(8*8),
+            )
+        )
+
+    def _make_tmp_path(self, remote_user=None):
+        """
+        Create a temporary subdirectory as a child of the temporary directory
+        managed by the remote interpreter.
+        """
+        LOG.debug('_make_tmp_path(remote_user=%r)', remote_user)
+        path = self._generate_tmp_path()
+        LOG.debug('Temporary directory: %r', path)
+        self._connection.get_chain().call_no_reply(os.mkdir, path)
+        self._connection._shell.tmpdir = path
+        return path
+
+    def _remove_tmp_path(self, tmp_path):
+        """
+        Replace the base implementation's invocation of rm -rf, replacing it
+        with a pipelined call to :func:`ansible_mitogen.target.prune_tree`.
+        """
+        LOG.debug('_remove_tmp_path(%r)', tmp_path)
+        if tmp_path is None and ansible.__version__ > '2.6':
+            tmp_path = self._connection._shell.tmpdir  # 06f73ad578d
+        if tmp_path is not None:
+            self._connection.get_chain().call_no_reply(
+                ansible_mitogen.target.prune_tree,
+                tmp_path,
+            )
+        self._connection._shell.tmpdir = None
+
+    def _transfer_data(self, remote_path, data):
+        """
+        Used by the base _execute_module(), and in <2.4 also by the template
+        action module, and probably others.
+        """
+        if isinstance(data, dict):
+            data = jsonify(data)
+        if not isinstance(data, bytes):
+            data = to_bytes(data, errors='surrogate_or_strict')
+
+        LOG.debug('_transfer_data(%r, %s ..%d bytes)',
+                  remote_path, type(data), len(data))
+        self._connection.put_data(remote_path, data)
+        return remote_path
+
+    #: Actions listed here cause :func:`_fixup_perms2` to avoid a needless
+    #: roundtrip, as they modify file modes separately afterwards. This is due
+    #: to the method prototype having a default of `execute=True`.
+    FIXUP_PERMS_RED_HERRING = set(['copy'])
+
+    def _fixup_perms2(self, remote_paths, remote_user=None, execute=True):
+        """
+        Mitogen always executes ActionBase helper methods in the context of the
+        target user account, so it is never necessary to modify permissions
+        except to ensure the execute bit is set if requested.
+        """
+        LOG.debug('_fixup_perms2(%r, remote_user=%r, execute=%r)',
+                  remote_paths, remote_user, execute)
+        if execute and self._task.action not in self.FIXUP_PERMS_RED_HERRING:
+            return self._remote_chmod(remote_paths, mode='u+x')
+        return self.COMMAND_RESULT.copy()
+
+    def _remote_chmod(self, paths, mode, sudoable=False):
+        """
+        Issue an asynchronous set_file_mode() call for every path in `paths`,
+        then format the resulting return value list with fake_shell().
+        """
+        LOG.debug('_remote_chmod(%r, mode=%r, sudoable=%r)',
+                  paths, mode, sudoable)
+        return self.fake_shell(lambda: mitogen.select.Select.all(
+            self._connection.get_chain().call_async(
+                ansible_mitogen.target.set_file_mode, path, mode
+            )
+            for path in paths
+        ))
+
+    def _remote_chown(self, paths, user, sudoable=False):
+        """
+        Issue an asynchronous os.chown() call for every path in `paths`, then
+        format the resulting return value list with fake_shell().
+        """
+        LOG.debug('_remote_chown(%r, user=%r, sudoable=%r)',
+                  paths, user, sudoable)
+        ent = self._connection.get_chain().call(pwd.getpwnam, user)
+        return self.fake_shell(lambda: mitogen.select.Select.all(
+            self._connection.get_chain().call_async(
+                os.chown, path, ent.pw_uid, ent.pw_gid
+            )
+            for path in paths
+        ))
+
+    def _remote_expand_user(self, path, sudoable=True):
+        """
+        Replace the base implementation's attempt to emulate
+        os.path.expanduser() with an actual call to os.path.expanduser().
+
+        :param bool sudoable:
+            If :data:`True`, indicate unqualified tilde ("~" with no username)
+            should be evaluated in the context of the login account, not any
+            become_user.
+        """
+        LOG.debug('_remote_expand_user(%r, sudoable=%r)', path, sudoable)
+        if not path.startswith('~'):
+            # /home/foo -> /home/foo
+            return path
+        if sudoable or not self._play_context.become:
+            if path == '~':
+                # ~ -> /home/dmw
+                return self._connection.homedir
+            if path.startswith('~/'):
+                # ~/.ansible -> /home/dmw/.ansible
+                return os.path.join(self._connection.homedir, path[2:])
+        # ~root/.ansible -> /root/.ansible
+        return self._connection.get_chain(use_login=(not sudoable)).call(
+            os.path.expanduser,
+            mitogen.utils.cast(path),
+        )
+
+    def get_task_timeout_secs(self):
+        """
+        Return the task "async:" value, portable across 2.4-2.5.
+        """
+        try:
+            return self._task.async_val
+        except AttributeError:
+            return getattr(self._task, 'async')
+
+    def _set_temp_file_args(self, module_args, wrap_async):
+        # Ansible>2.5 module_utils reuses the action's temporary directory if
+        # one exists. Older versions error if this key is present.
+        if ansible.__version__ > '2.5':
+            if wrap_async:
+                # Sharing is not possible with async tasks, as in that case,
+                # the directory must outlive the action plug-in.
+                module_args['_ansible_tmpdir'] = None
+            else:
+                module_args['_ansible_tmpdir'] = self._connection._shell.tmpdir
+
+        # If _ansible_tmpdir is unset, Ansible>2.6 module_utils will use
+        # _ansible_remote_tmp as the location to create the module's temporary
+        # directory. Older versions error if this key is present.
+        if ansible.__version__ > '2.6':
+            module_args['_ansible_remote_tmp'] = (
+                self._connection.get_good_temp_dir()
+            )
+
+    def _execute_module(self, module_name=None, module_args=None, tmp=None,
+                        task_vars=None, persist_files=False,
+                        delete_remote_tmp=True, wrap_async=False):
+        """
+        Collect up a module's execution environment then use it to invoke
+        target.run_module() or helpers.run_module_async() in the target
+        context.
+        """
+        if module_name is None:
+            module_name = self._task.action
+        if module_args is None:
+            module_args = self._task.args
+        if task_vars is None:
+            task_vars = {}
+
+        self._update_module_args(module_name, module_args, task_vars)
+        env = {}
+        self._compute_environment_string(env)
+        self._set_temp_file_args(module_args, wrap_async)
+
+        self._connection._connect()
+        result = ansible_mitogen.planner.invoke(
+            ansible_mitogen.planner.Invocation(
+                action=self,
+                connection=self._connection,
+                module_name=mitogen.core.to_text(module_name),
+                module_args=mitogen.utils.cast(module_args),
+                task_vars=task_vars,
+                templar=self._templar,
+                env=mitogen.utils.cast(env),
+                wrap_async=wrap_async,
+                timeout_secs=self.get_task_timeout_secs(),
+            )
+        )
+
+        if tmp and ansible.__version__ < '2.5' and delete_remote_tmp:
+            # Built-in actions expected tmpdir to be cleaned up automatically
+            # on _execute_module().
+            self._remove_tmp_path(tmp)
+
+        return wrap_var(result)
+
+    def _postprocess_response(self, result):
+        """
+        Apply fixups mimicking ActionBase._execute_module(); this is copied
+        verbatim from action/__init__.py, the guts of _parse_returned_data are
+        garbage and should be removed or reimplemented once tests exist.
+
+        :param dict result:
+            Dictionary with format::
+
+                {
+                    "rc": int,
+                    "stdout": "stdout data",
+                    "stderr": "stderr data"
+                }
+        """
+        data = self._parse_returned_data(result)
+
+        # Cutpasted from the base implementation.
+        if 'stdout' in data and 'stdout_lines' not in data:
+            data['stdout_lines'] = (data['stdout'] or u'').splitlines()
+        if 'stderr' in data and 'stderr_lines' not in data:
+            data['stderr_lines'] = (data['stderr'] or u'').splitlines()
+
+        return data
+
+    def _low_level_execute_command(self, cmd, sudoable=True, in_data=None,
+                                   executable=None,
+                                   encoding_errors='surrogate_then_replace',
+                                   chdir=None):
+        """
+        Override the base implementation by simply calling
+        target.exec_command() in the target context.
+        """
+        LOG.debug('_low_level_execute_command(%r, in_data=%r, exe=%r, dir=%r)',
+                  cmd, type(in_data), executable, chdir)
+        if executable is None:  # executable defaults to False
+            executable = self._play_context.executable
+        if executable:
+            cmd = executable + ' -c ' + shlex_quote(cmd)
+
+        rc, stdout, stderr = self._connection.exec_command(
+            cmd=cmd,
+            in_data=in_data,
+            sudoable=sudoable,
+            mitogen_chdir=chdir,
+        )
+        stdout_text = to_text(stdout, errors=encoding_errors)
+
+        return {
+            'rc': rc,
+            'stdout': stdout_text,
+            'stdout_lines': stdout_text.splitlines(),
+            'stderr': stderr,
+        }
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/module_finder.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/module_finder.py
new file mode 100644
index 0000000000000000000000000000000000000000..89aa2beba6667b3c4c8e6457b95b6a35aaca6091
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/module_finder.py
@@ -0,0 +1,157 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import collections
+import imp
+import os
+
+import mitogen.master
+
+
+PREFIX = 'ansible.module_utils.'
+
+
+Module = collections.namedtuple('Module', 'name path kind parent')
+
+
+def get_fullname(module):
+    """
+    Reconstruct a Module's canonical path by recursing through its parents.
+    """
+    bits = [str(module.name)]
+    while module.parent:
+        bits.append(str(module.parent.name))
+        module = module.parent
+    return '.'.join(reversed(bits))
+
+
+def get_code(module):
+    """
+    Compile and return a Module's code object.
+    """
+    fp = open(module.path, 'rb')
+    try:
+        return compile(fp.read(), str(module.name), 'exec')
+    finally:
+        fp.close()
+
+
+def is_pkg(module):
+    """
+    Return :data:`True` if a Module represents a package.
+    """
+    return module.kind == imp.PKG_DIRECTORY
+
+
+def find(name, path=(), parent=None):
+    """
+    Return a Module instance describing the first matching module found on the
+    search path.
+
+    :param str name:
+        Module name.
+    :param list path:
+        List of directory names to search for the module.
+    :param Module parent:
+        Optional module parent.
+    """
+    assert isinstance(path, tuple)
+    head, _, tail = name.partition('.')
+    try:
+        tup = imp.find_module(head, list(path))
+    except ImportError:
+        return parent
+
+    fp, modpath, (suffix, mode, kind) = tup
+    if fp:
+        fp.close()
+
+    if parent and modpath == parent.path:
+        # 'from timeout import timeout', where 'timeout' is a function but also
+        # the name of the module being imported.
+        return None
+
+    if kind == imp.PKG_DIRECTORY:
+        modpath = os.path.join(modpath, '__init__.py')
+
+    module = Module(head, modpath, kind, parent)
+    # TODO: this code is entirely wrong on Python 3.x, but works well enough
+    # for Ansible. We need a new find_child() that only looks in the package
+    # directory, never falling back to the parent search path.
+    if tail and kind == imp.PKG_DIRECTORY:
+        return find_relative(module, tail, path)
+    return module
+
+
+def find_relative(parent, name, path=()):
+    if parent.kind == imp.PKG_DIRECTORY:
+        path = (os.path.dirname(parent.path),) + path
+    return find(name, path, parent=parent)
+
+
+def scan_fromlist(code):
+    for level, modname_s, fromlist in mitogen.master.scan_code_imports(code):
+        for name in fromlist:
+            yield level, '%s.%s' % (modname_s, name)
+        if not fromlist:
+            yield level, modname_s
+
+
+def scan(module_name, module_path, search_path):
+    module = Module(module_name, module_path, imp.PY_SOURCE, None)
+    stack = [module]
+    seen = set()
+
+    while stack:
+        module = stack.pop(0)
+        for level, fromname in scan_fromlist(get_code(module)):
+            if not fromname.startswith(PREFIX):
+                continue
+
+            imported = find(fromname[len(PREFIX):], search_path)
+            if imported is None or imported in seen:
+                continue
+
+            seen.add(imported)
+            stack.append(imported)
+            parent = imported.parent
+            while parent:
+                fullname = get_fullname(parent)
+                module = Module(fullname, parent.path, parent.kind, None)
+                if module not in seen:
+                    seen.add(module)
+                    stack.append(module)
+                parent = parent.parent
+
+    return sorted(
+        (PREFIX + get_fullname(module), module.path, is_pkg(module))
+        for module in seen
+    )
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/parsing.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/parsing.py
new file mode 100644
index 0000000000000000000000000000000000000000..27fca7cd6793c81b394916b55b562637585a4db6
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/parsing.py
@@ -0,0 +1,76 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import mitogen.core
+
+
+def parse_script_interpreter(source):
+    """
+    Parse the script interpreter portion of a UNIX hashbang using the rules
+    Linux uses.
+
+    :param str source: String like "/usr/bin/env python".
+
+    :returns:
+        Tuple of `(interpreter, arg)`, where `intepreter` is the script
+        interpreter and `arg` is its sole argument if present, otherwise
+        :py:data:`None`.
+    """
+    # Find terminating newline. Assume last byte of binprm_buf if absent.
+    nl = source.find(b'\n', 0, 128)
+    if nl == -1:
+        nl = min(128, len(source))
+
+    # Split once on the first run of whitespace. If no whitespace exists,
+    # bits just contains the interpreter filename.
+    bits = source[0:nl].strip().split(None, 1)
+    if len(bits) == 1:
+        return mitogen.core.to_text(bits[0]), None
+    return mitogen.core.to_text(bits[0]), mitogen.core.to_text(bits[1])
+
+
+def parse_hashbang(source):
+    """
+    Parse a UNIX "hashbang line" using the syntax supported by Linux.
+
+    :param str source: String like "#!/usr/bin/env python".
+
+    :returns:
+        Tuple of `(interpreter, arg)`, where `intepreter` is the script
+        interpreter and `arg` is its sole argument if present, otherwise
+        :py:data:`None`.
+    """
+    # Linux requires first 2 bytes with no whitespace, pretty sure it's the
+    # same everywhere. See binfmt_script.c.
+    if not source.startswith(b'#!'):
+        return None, None
+
+    return parse_script_interpreter(source[2:])
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/planner.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/planner.py
new file mode 100644
index 0000000000000000000000000000000000000000..8febbdb32ff38848f8fe847b00da4b00f0cdbf02
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/planner.py
@@ -0,0 +1,576 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+"""
+Classes to detect each case from [0] and prepare arguments necessary for the
+corresponding Runner class within the target, including preloading requisite
+files/modules known missing.
+
+[0] "Ansible Module Architecture", developing_program_flow_modules.html
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import json
+import logging
+import os
+import random
+
+from ansible.executor import module_common
+import ansible.errors
+import ansible.module_utils
+import ansible.release
+import mitogen.core
+import mitogen.select
+
+import ansible_mitogen.loaders
+import ansible_mitogen.parsing
+import ansible_mitogen.target
+
+
+LOG = logging.getLogger(__name__)
+NO_METHOD_MSG = 'Mitogen: no invocation method found for: '
+NO_INTERPRETER_MSG = 'module (%s) is missing interpreter line'
+NO_MODULE_MSG = 'The module %s was not found in configured module paths.'
+
+_planner_by_path = {}
+
+
+class Invocation(object):
+    """
+    Collect up a module's execution environment then use it to invoke
+    target.run_module() or helpers.run_module_async() in the target context.
+    """
+    def __init__(self, action, connection, module_name, module_args,
+                 task_vars, templar, env, wrap_async, timeout_secs):
+        #: ActionBase instance invoking the module. Required to access some
+        #: output postprocessing methods that don't belong in ActionBase at
+        #: all.
+        self.action = action
+        #: Ansible connection to use to contact the target. Must be an
+        #: ansible_mitogen connection.
+        self.connection = connection
+        #: Name of the module ('command', 'shell', etc.) to execute.
+        self.module_name = module_name
+        #: Final module arguments.
+        self.module_args = module_args
+        #: Task variables, needed to extract ansible_*_interpreter.
+        self.task_vars = task_vars
+        #: Templar, needed to extract ansible_*_interpreter.
+        self.templar = templar
+        #: Final module environment.
+        self.env = env
+        #: Boolean, if :py:data:`True`, launch the module asynchronously.
+        self.wrap_async = wrap_async
+        #: Integer, if >0, limit the time an asynchronous job may run for.
+        self.timeout_secs = timeout_secs
+        #: Initially ``None``, but set by :func:`invoke`. The path on the
+        #: master to the module's implementation file.
+        self.module_path = None
+        #: Initially ``None``, but set by :func:`invoke`. The raw source or
+        #: binary contents of the module.
+        self._module_source = None
+
+    def get_module_source(self):
+        if self._module_source is None:
+            self._module_source = read_file(self.module_path)
+        return self._module_source
+
+    def __repr__(self):
+        return 'Invocation(module_name=%s)' % (self.module_name,)
+
+
+class Planner(object):
+    """
+    A Planner receives a module name and the contents of its implementation
+    file, indicates whether or not it understands how to run the module, and
+    exports a method to run the module.
+    """
+    def __init__(self, invocation):
+        self._inv = invocation
+
+    @classmethod
+    def detect(cls, path, source):
+        """
+        Return true if the supplied `invocation` matches the module type
+        implemented by this planner.
+        """
+        raise NotImplementedError()
+
+    def should_fork(self):
+        """
+        Asynchronous tasks must always be forked.
+        """
+        return self._inv.wrap_async
+
+    def get_push_files(self):
+        """
+        Return a list of files that should be propagated to the target context
+        using PushFileService. The default implementation pushes nothing.
+        """
+        return []
+
+    def get_module_deps(self):
+        """
+        Return a list of the Python module names imported by the module.
+        """
+        return []
+
+    def get_kwargs(self, **kwargs):
+        """
+        If :meth:`detect` returned :data:`True`, plan for the module's
+        execution, including granting access to or delivering any files to it
+        that are known to be absent, and finally return a dict::
+
+            {
+                # Name of the class from runners.py that implements the
+                # target-side execution of this module type.
+                "runner_name": "...",
+
+                # Remaining keys are passed to the constructor of the class
+                # named by `runner_name`.
+            }
+        """
+        binding = self._inv.connection.get_binding()
+
+        new = dict((mitogen.core.UnicodeType(k), kwargs[k])
+                   for k in kwargs)
+        new.setdefault('good_temp_dir',
+            self._inv.connection.get_good_temp_dir())
+        new.setdefault('cwd', self._inv.connection.get_default_cwd())
+        new.setdefault('extra_env', self._inv.connection.get_default_env())
+        new.setdefault('emulate_tty', True)
+        new.setdefault('service_context', binding.get_child_service_context())
+        return new
+
+    def __repr__(self):
+        return '%s()' % (type(self).__name__,)
+
+
+class BinaryPlanner(Planner):
+    """
+    Binary modules take their arguments and will return data to Ansible in the
+    same way as want JSON modules.
+    """
+    runner_name = 'BinaryRunner'
+
+    @classmethod
+    def detect(cls, path, source):
+        return module_common._is_binary(source)
+
+    def get_push_files(self):
+        return [mitogen.core.to_text(self._inv.module_path)]
+
+    def get_kwargs(self, **kwargs):
+        return super(BinaryPlanner, self).get_kwargs(
+            runner_name=self.runner_name,
+            module=self._inv.module_name,
+            path=self._inv.module_path,
+            json_args=json.dumps(self._inv.module_args),
+            env=self._inv.env,
+            **kwargs
+        )
+
+
+class ScriptPlanner(BinaryPlanner):
+    """
+    Common functionality for script module planners -- handle interpreter
+    detection and rewrite.
+    """
+    def _rewrite_interpreter(self, path):
+        """
+        Given the original interpreter binary extracted from the script's
+        interpreter line, look up the associated `ansible_*_interpreter`
+        variable, render it and return it.
+
+        :param str path:
+            Absolute UNIX path to original interpreter.
+
+        :returns:
+            Shell fragment prefix used to execute the script via "/bin/sh -c".
+            While `ansible_*_interpreter` documentation suggests shell isn't
+            involved here, the vanilla implementation uses it and that use is
+            exploited in common playbooks.
+        """
+        key = u'ansible_%s_interpreter' % os.path.basename(path).strip()
+        try:
+            template = self._inv.task_vars[key]
+        except KeyError:
+            return path
+
+        return mitogen.utils.cast(self._inv.templar.template(template))
+
+    def _get_interpreter(self):
+        path, arg = ansible_mitogen.parsing.parse_hashbang(
+            self._inv.get_module_source()
+        )
+        if path is None:
+            raise ansible.errors.AnsibleError(NO_INTERPRETER_MSG % (
+                self._inv.module_name,
+            ))
+
+        fragment = self._rewrite_interpreter(path)
+        if arg:
+            fragment += ' ' + arg
+
+        return fragment, path.startswith('python')
+
+    def get_kwargs(self, **kwargs):
+        interpreter_fragment, is_python = self._get_interpreter()
+        return super(ScriptPlanner, self).get_kwargs(
+            interpreter_fragment=interpreter_fragment,
+            is_python=is_python,
+            **kwargs
+        )
+
+
+class JsonArgsPlanner(ScriptPlanner):
+    """
+    Script that has its interpreter directive and the task arguments
+    substituted into its source as a JSON string.
+    """
+    runner_name = 'JsonArgsRunner'
+
+    @classmethod
+    def detect(cls, path, source):
+        return module_common.REPLACER_JSONARGS in source
+
+
+class WantJsonPlanner(ScriptPlanner):
+    """
+    If a module has the string WANT_JSON in it anywhere, Ansible treats it as a
+    non-native module that accepts a filename as its only command line
+    parameter. The filename is for a temporary file containing a JSON string
+    containing the module's parameters. The module needs to open the file, read
+    and parse the parameters, operate on the data, and print its return data as
+    a JSON encoded dictionary to stdout before exiting.
+
+    These types of modules are self-contained entities. As of Ansible 2.1,
+    Ansible only modifies them to change a shebang line if present.
+    """
+    runner_name = 'WantJsonRunner'
+
+    @classmethod
+    def detect(cls, path, source):
+        return b'WANT_JSON' in source
+
+
+class NewStylePlanner(ScriptPlanner):
+    """
+    The Ansiballz framework differs from module replacer in that it uses real
+    Python imports of things in ansible/module_utils instead of merely
+    preprocessing the module.
+    """
+    runner_name = 'NewStyleRunner'
+    marker = b'from ansible.module_utils.'
+
+    @classmethod
+    def detect(cls, path, source):
+        return cls.marker in source
+
+    def _get_interpreter(self):
+        return None, None
+
+    def get_push_files(self):
+        return super(NewStylePlanner, self).get_push_files() + [
+            mitogen.core.to_text(path)
+            for fullname, path, is_pkg in self.get_module_map()['custom']
+        ]
+
+    def get_module_deps(self):
+        return self.get_module_map()['builtin']
+
+    #: Module names appearing in this set always require forking, usually due
+    #: to some terminal leakage that cannot be worked around in any sane
+    #: manner.
+    ALWAYS_FORK_MODULES = frozenset([
+        'dnf',  # issue #280; py-dnf/hawkey need therapy
+        'firewalld',  # issue #570: ansible module_utils caches dbus conn
+    ])
+
+    def should_fork(self):
+        """
+        In addition to asynchronous tasks, new-style modules should be forked
+        if:
+
+        * the user specifies mitogen_task_isolation=fork, or
+        * the new-style module has a custom module search path, or
+        * the module is known to leak like a sieve.
+        """
+        return (
+            super(NewStylePlanner, self).should_fork() or
+            (self._inv.task_vars.get('mitogen_task_isolation') == 'fork') or
+            (self._inv.module_name in self.ALWAYS_FORK_MODULES) or
+            (len(self.get_module_map()['custom']) > 0)
+        )
+
+    def get_search_path(self):
+        return tuple(
+            path
+            for path in ansible_mitogen.loaders.module_utils_loader._get_paths(
+                subdirs=False
+            )
+        )
+
+    _module_map = None
+
+    def get_module_map(self):
+        if self._module_map is None:
+            binding = self._inv.connection.get_binding()
+            self._module_map = mitogen.service.call(
+                call_context=binding.get_service_context(),
+                service_name='ansible_mitogen.services.ModuleDepService',
+                method_name='scan',
+
+                module_name='ansible_module_%s' % (self._inv.module_name,),
+                module_path=self._inv.module_path,
+                search_path=self.get_search_path(),
+                builtin_path=module_common._MODULE_UTILS_PATH,
+                context=self._inv.connection.context,
+            )
+        return self._module_map
+
+    def get_kwargs(self):
+        return super(NewStylePlanner, self).get_kwargs(
+            module_map=self.get_module_map(),
+            py_module_name=py_modname_from_path(
+                self._inv.module_name,
+                self._inv.module_path,
+            ),
+        )
+
+
+class ReplacerPlanner(NewStylePlanner):
+    """
+    The Module Replacer framework is the original framework implementing
+    new-style modules. It is essentially a preprocessor (like the C
+    Preprocessor for those familiar with that programming language). It does
+    straight substitutions of specific substring patterns in the module file.
+    There are two types of substitutions.
+
+    * Replacements that only happen in the module file. These are public
+      replacement strings that modules can utilize to get helpful boilerplate
+      or access to arguments.
+
+      "from ansible.module_utils.MOD_LIB_NAME import *" is replaced with the
+      contents of the ansible/module_utils/MOD_LIB_NAME.py. These should only
+      be used with new-style Python modules.
+
+      "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>" is equivalent to
+      "from ansible.module_utils.basic import *" and should also only apply to
+      new-style Python modules.
+
+      "# POWERSHELL_COMMON" substitutes the contents of
+      "ansible/module_utils/powershell.ps1". It should only be used with
+      new-style Powershell modules.
+    """
+    runner_name = 'ReplacerRunner'
+
+    @classmethod
+    def detect(cls, path, source):
+        return module_common.REPLACER in source
+
+
+class OldStylePlanner(ScriptPlanner):
+    runner_name = 'OldStyleRunner'
+
+    @classmethod
+    def detect(cls, path, source):
+        # Everything else.
+        return True
+
+
+_planners = [
+    BinaryPlanner,
+    # ReplacerPlanner,
+    NewStylePlanner,
+    JsonArgsPlanner,
+    WantJsonPlanner,
+    OldStylePlanner,
+]
+
+
+try:
+    _get_ansible_module_fqn = module_common._get_ansible_module_fqn
+except AttributeError:
+    _get_ansible_module_fqn = None
+
+
+def py_modname_from_path(name, path):
+    """
+    Fetch the logical name of a new-style module as it might appear in
+    :data:`sys.modules` of the target's Python interpreter.
+
+    * For Ansible <2.7, this is an unpackaged module named like
+      "ansible_module_%s".
+
+    * For Ansible <2.9, this is an unpackaged module named like
+      "ansible.modules.%s"
+
+    * Since Ansible 2.9, modules appearing within a package have the original
+      package hierarchy approximated on the target, enabling relative imports
+      to function correctly. For example, "ansible.modules.system.setup".
+    """
+    # 2.9+
+    if _get_ansible_module_fqn:
+        try:
+            return _get_ansible_module_fqn(path)
+        except ValueError:
+            pass
+
+    if ansible.__version__ < '2.7':
+        return 'ansible_module_' + name
+
+    return 'ansible.modules.' + name
+
+
+def read_file(path):
+    fd = os.open(path, os.O_RDONLY)
+    try:
+        bits = []
+        chunk = True
+        while True:
+            chunk = os.read(fd, 65536)
+            if not chunk:
+                break
+            bits.append(chunk)
+    finally:
+        os.close(fd)
+
+    return mitogen.core.b('').join(bits)
+
+
+def _propagate_deps(invocation, planner, context):
+    binding = invocation.connection.get_binding()
+    mitogen.service.call(
+        call_context=binding.get_service_context(),
+        service_name='mitogen.service.PushFileService',
+        method_name='propagate_paths_and_modules',
+
+        context=context,
+        paths=planner.get_push_files(),
+        modules=planner.get_module_deps(),
+    )
+
+
+def _invoke_async_task(invocation, planner):
+    job_id = '%016x' % random.randint(0, 2**64)
+    context = invocation.connection.spawn_isolated_child()
+    _propagate_deps(invocation, planner, context)
+
+    with mitogen.core.Receiver(context.router) as started_recv:
+        call_recv = context.call_async(
+            ansible_mitogen.target.run_module_async,
+            job_id=job_id,
+            timeout_secs=invocation.timeout_secs,
+            started_sender=started_recv.to_sender(),
+            kwargs=planner.get_kwargs(),
+        )
+
+        # Wait for run_module_async() to crash, or for AsyncRunner to indicate
+        # the job file has been written.
+        for msg in mitogen.select.Select([started_recv, call_recv]):
+            if msg.receiver is call_recv:
+                # It can only be an exception.
+                raise msg.unpickle()
+            break
+
+        return {
+            'stdout': json.dumps({
+                # modules/utilities/logic/async_wrapper.py::_run_module().
+                'changed': True,
+                'started': 1,
+                'finished': 0,
+                'ansible_job_id': job_id,
+            })
+        }
+
+
+def _invoke_isolated_task(invocation, planner):
+    context = invocation.connection.spawn_isolated_child()
+    _propagate_deps(invocation, planner, context)
+    try:
+        return context.call(
+            ansible_mitogen.target.run_module,
+            kwargs=planner.get_kwargs(),
+        )
+    finally:
+        context.shutdown()
+
+
+def _get_planner(name, path, source):
+    for klass in _planners:
+        if klass.detect(path, source):
+            LOG.debug('%r accepted %r (filename %r)', klass, name, path)
+            return klass
+        LOG.debug('%r rejected %r', klass, name)
+    raise ansible.errors.AnsibleError(NO_METHOD_MSG + repr(invocation))
+
+
+def invoke(invocation):
+    """
+    Find a Planner subclass corresnding to `invocation` and use it to invoke
+    the module.
+
+    :param Invocation invocation:
+    :returns:
+        Module return dict.
+    :raises ansible.errors.AnsibleError:
+        Unrecognized/unsupported module type.
+    """
+    path = ansible_mitogen.loaders.module_loader.find_plugin(
+        invocation.module_name,
+        '',
+    )
+    if path is None:
+        raise ansible.errors.AnsibleError(NO_MODULE_MSG % (
+            invocation.module_name,
+        ))
+
+    invocation.module_path = mitogen.core.to_text(path)
+    if invocation.module_path not in _planner_by_path:
+        _planner_by_path[invocation.module_path] = _get_planner(
+            invocation.module_name,
+            invocation.module_path,
+            invocation.get_module_source()
+        )
+
+    planner = _planner_by_path[invocation.module_path](invocation)
+    if invocation.wrap_async:
+        response = _invoke_async_task(invocation, planner)
+    elif planner.should_fork():
+        response = _invoke_isolated_task(invocation, planner)
+    else:
+        _propagate_deps(invocation, planner, invocation.connection.context)
+        response = invocation.connection.get_chain().call(
+            ansible_mitogen.target.run_module,
+            kwargs=planner.get_kwargs(),
+        )
+
+    return invocation.action._postprocess_response(response)
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/__init__.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/action/__init__.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/action/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/action/mitogen_fetch.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/action/mitogen_fetch.py
new file mode 100644
index 0000000000000000000000000000000000000000..1844efd8814c553c024a77e9aad5e43145987082
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/action/mitogen_fetch.py
@@ -0,0 +1,162 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.hashing import checksum, md5, secure_hash
+from ansible.utils.path import makedirs_safe
+
+
+REMOTE_CHECKSUM_ERRORS = {
+    '0': "unable to calculate the checksum of the remote file",
+    '1': "the remote file does not exist",
+    '2': "no read permission on remote file",
+    '3': "remote file is a directory, fetch cannot work on directories",
+    '4': "python isn't present on the system.  Unable to compute checksum",
+    '5': "stdlib json was not found on the remote machine. Only the raw module can work without those installed",
+}
+
+
+class ActionModule(ActionBase):
+
+    def run(self, tmp=None, task_vars=None):
+        ''' handler for fetch operations '''
+        if task_vars is None:
+            task_vars = dict()
+
+        result = super(ActionModule, self).run(tmp, task_vars)
+        try:
+            if self._play_context.check_mode:
+                result['skipped'] = True
+                result['msg'] = 'check mode not (yet) supported for this module'
+                return result
+
+            flat = boolean(self._task.args.get('flat'), strict=False)
+            fail_on_missing = boolean(self._task.args.get('fail_on_missing', True), strict=False)
+            validate_checksum = boolean(self._task.args.get('validate_checksum', True), strict=False)
+
+            # validate source and dest are strings FIXME: use basic.py and module specs
+            source = self._task.args.get('src')
+            if not isinstance(source, string_types):
+                result['msg'] = "Invalid type supplied for source option, it must be a string"
+
+            dest = self._task.args.get('dest')
+            if not isinstance(dest, string_types):
+                result['msg'] = "Invalid type supplied for dest option, it must be a string"
+
+            if result.get('msg'):
+                result['failed'] = True
+                return result
+
+            source = self._connection._shell.join_path(source)
+            source = self._remote_expand_user(source)
+
+            # calculate checksum for the remote file, don't bother if using
+            # become as slurp will be used Force remote_checksum to follow
+            # symlinks because fetch always follows symlinks
+            remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True)
+
+            # calculate the destination name
+            if os.path.sep not in self._connection._shell.join_path('a', ''):
+                source = self._connection._shell._unquote(source)
+                source_local = source.replace('\\', '/')
+            else:
+                source_local = source
+
+            dest = os.path.expanduser(dest)
+            if flat:
+                if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict')) and not dest.endswith(os.sep):
+                    result['msg'] = "dest is an existing directory, use a trailing slash if you want to fetch src into that directory"
+                    result['file'] = dest
+                    result['failed'] = True
+                    return result
+                if dest.endswith(os.sep):
+                    # if the path ends with "/", we'll use the source filename as the
+                    # destination filename
+                    base = os.path.basename(source_local)
+                    dest = os.path.join(dest, base)
+                if not dest.startswith("/"):
+                    # if dest does not start with "/", we'll assume a relative path
+                    dest = self._loader.path_dwim(dest)
+            else:
+                # files are saved in dest dir, with a subdir for each host, then the filename
+                if 'inventory_hostname' in task_vars:
+                    target_name = task_vars['inventory_hostname']
+                else:
+                    target_name = self._play_context.remote_addr
+                dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local)
+
+            dest = dest.replace("//", "/")
+
+            if remote_checksum in REMOTE_CHECKSUM_ERRORS:
+                result['changed'] = False
+                result['file'] = source
+                result['msg'] = REMOTE_CHECKSUM_ERRORS[remote_checksum]
+                # Historically, these don't fail because you may want to transfer
+                # a log file that possibly MAY exist but keep going to fetch other
+                # log files. Today, this is better achieved by adding
+                # ignore_errors or failed_when to the task.  Control the behaviour
+                # via fail_when_missing
+                if fail_on_missing:
+                    result['failed'] = True
+                    del result['changed']
+                else:
+                    result['msg'] += ", not transferring, ignored"
+                return result
+
+            # calculate checksum for the local file
+            local_checksum = checksum(dest)
+
+            if remote_checksum != local_checksum:
+                # create the containing directories, if needed
+                makedirs_safe(os.path.dirname(dest))
+
+                # fetch the file and check for changes
+                self._connection.fetch_file(source, dest)
+                new_checksum = secure_hash(dest)
+                # For backwards compatibility. We'll return None on FIPS enabled systems
+                try:
+                    new_md5 = md5(dest)
+                except ValueError:
+                    new_md5 = None
+
+                if validate_checksum and new_checksum != remote_checksum:
+                    result.update(dict(failed=True, md5sum=new_md5,
+                                       msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None,
+                                       checksum=new_checksum, remote_checksum=remote_checksum))
+                else:
+                    result.update({'changed': True, 'md5sum': new_md5, 'dest': dest,
+                                   'remote_md5sum': None, 'checksum': new_checksum,
+                                   'remote_checksum': remote_checksum})
+            else:
+                # For backwards compatibility. We'll return None on FIPS enabled systems
+                try:
+                    local_md5 = md5(dest)
+                except ValueError:
+                    local_md5 = None
+                result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum))
+
+        finally:
+            self._remove_tmp_path(self._connection._shell.tmpdir)
+
+        return result
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/action/mitogen_get_stack.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/action/mitogen_get_stack.py
new file mode 100644
index 0000000000000000000000000000000000000000..171f84ea7cf5e9877bbbca3a2f8cef2a7c66f5f0
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/action/mitogen_get_stack.py
@@ -0,0 +1,55 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+"""
+Fetch the connection configuration stack that would be used to connect to a
+target, without actually connecting to it.
+"""
+
+import ansible_mitogen.connection
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+    def run(self, tmp=None, task_vars=None):
+        if not isinstance(self._connection,
+                          ansible_mitogen.connection.Connection):
+            return {
+                'skipped': True,
+            }
+
+        _, stack = self._connection._build_stack()
+        return {
+            'changed': True,
+            'result': stack,
+            '_ansible_verbose_always': True,
+        }
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/__init__.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_buildah.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_buildah.py
new file mode 100644
index 0000000000000000000000000000000000000000..017214b2469c497cf8a1e99016af1dbde41917eb
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_buildah.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'buildah'
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_doas.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_doas.py
new file mode 100644
index 0000000000000000000000000000000000000000..1113d7c63e766a3522ef5f9f8de4d4803f5654b6
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_doas.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen.connection
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'mitogen_doas'
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_docker.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_docker.py
new file mode 100644
index 0000000000000000000000000000000000000000..b71ef5f11ab48c1b0679af2e4ae42363282a481b
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_docker.py
@@ -0,0 +1,51 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'docker'
+
+    @property
+    def docker_cmd(self):
+        """
+        Ansible 2.3 synchronize module wants to know how we run Docker.
+        """
+        return 'docker'
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_jail.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_jail.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7475fb1431c528bd071013f8b53c1624ba9c6b3
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_jail.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'jail'
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_kubectl.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_kubectl.py
new file mode 100644
index 0000000000000000000000000000000000000000..44d3b50a2057b59ecb22f9bed7ac7f474933dc07
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_kubectl.py
@@ -0,0 +1,79 @@
+# coding: utf-8
+# Copyright 2018, Yannig Perré
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+from ansible.errors import AnsibleConnectionFailure
+from ansible.module_utils.six import iteritems
+
+try:
+    import ansible_mitogen
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+import ansible_mitogen.loaders
+
+
+_class = ansible_mitogen.loaders.connection_loader__get(
+    'kubectl',
+    class_only=True,
+)
+
+if _class:
+    kubectl = sys.modules[_class.__module__]
+    del _class
+else:
+    kubectl = None
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'kubectl'
+
+    not_supported_msg = (
+        'The "mitogen_kubectl" plug-in requires a version of Ansible '
+        'that ships with the "kubectl" connection plug-in.'
+    )
+
+    def __init__(self, *args, **kwargs):
+        if kubectl is None:
+            raise AnsibleConnectionFailure(self.not_supported_msg)
+        super(Connection, self).__init__(*args, **kwargs)
+
+    def get_extra_args(self):
+        parameters = []
+        for key, option in iteritems(kubectl.CONNECTION_OPTIONS):
+            if self.get_task_var('ansible_' + key) is not None:
+                parameters += [ option, self.get_task_var('ansible_' + key) ]
+
+        return parameters
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_local.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_local.py
new file mode 100644
index 0000000000000000000000000000000000000000..a98c834c59e0cb7e3fc4709e36aa9263b6351237
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_local.py
@@ -0,0 +1,86 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen.connection
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+import ansible_mitogen.process
+
+
+if sys.version_info > (3,):
+    viewkeys = dict.keys
+elif sys.version_info > (2, 7):
+    viewkeys = dict.viewkeys
+else:
+    viewkeys = lambda dct: set(dct)
+
+
+def dict_diff(old, new):
+    """
+    Return a dict representing the differences between the dicts `old` and
+    `new`. Deleted keys appear as a key with the value :data:`None`, added and
+    changed keys appear as a key with the new value.
+    """
+    old_keys = viewkeys(old)
+    new_keys = viewkeys(dict(new))
+    out = {}
+    for key in new_keys - old_keys:
+        out[key] = new[key]
+    for key in old_keys - new_keys:
+        out[key] = None
+    for key in old_keys & new_keys:
+        if old[key] != new[key]:
+            out[key] = new[key]
+    return out
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'local'
+
+    def get_default_cwd(self):
+        # https://github.com/ansible/ansible/issues/14489
+        return self.loader_basedir
+
+    def get_default_env(self):
+        """
+        Vanilla Ansible local commands execute with an environment inherited
+        from WorkerProcess, we must emulate that.
+        """
+        return dict_diff(
+            old=ansible_mitogen.process.MuxProcess.cls_original_env,
+            new=os.environ,
+        )
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_lxc.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_lxc.py
new file mode 100644
index 0000000000000000000000000000000000000000..696c9abd0a580ddd5f798c15e074747b7d28ea78
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_lxc.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'lxc'
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_lxd.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_lxd.py
new file mode 100644
index 0000000000000000000000000000000000000000..95e692a013b6501f5a0733967d561cf5214f363a
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_lxd.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'lxd'
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_machinectl.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_machinectl.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f5a0d2827452f9bdeffcea836cafb1884b4e5ab
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_machinectl.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen.connection
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'machinectl'
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_setns.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_setns.py
new file mode 100644
index 0000000000000000000000000000000000000000..20c6f1370c22856ba7eb46d8437a65aca0c36f12
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_setns.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen.connection
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'setns'
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_ssh.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_ssh.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c81dae52eacc39195d5dc51e400d786a8a3590b
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_ssh.py
@@ -0,0 +1,67 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+DOCUMENTATION = """
+    author: David Wilson <dw@botanicus.net>
+    connection: mitogen_ssh
+    short_description: Connect over SSH via Mitogen
+    description:
+        - This connects using an OpenSSH client controlled by the Mitogen for
+          Ansible extension. It accepts every option the vanilla ssh plugin
+          accepts.
+    version_added: "2.5"
+    options:
+"""
+
+try:
+    import ansible_mitogen
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+import ansible_mitogen.loaders
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'ssh'
+    vanilla_class = ansible_mitogen.loaders.connection_loader__get(
+        'ssh',
+        class_only=True,
+    )
+
+    @staticmethod
+    def _create_control_path(*args, **kwargs):
+        """Forward _create_control_path() to the implementation in ssh.py."""
+        # https://github.com/dw/mitogen/issues/342
+        return Connection.vanilla_class._create_control_path(*args, **kwargs)
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_su.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_su.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ab2711e5c37546f1f316f6527fd39c941f47dd8
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_su.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen.connection
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'mitogen_su'
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_sudo.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_sudo.py
new file mode 100644
index 0000000000000000000000000000000000000000..130f544541166c238e81fcffde5d52d3bc92b8c1
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_sudo.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen.connection
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'mitogen_sudo'
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy/__init__.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen.py
new file mode 100644
index 0000000000000000000000000000000000000000..66872663ff4e029c0b0ad98a92810648a04a8812
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen.py
@@ -0,0 +1,61 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+#
+# This is not the real Strategy implementation module, it simply exists as a
+# proxy to the real module, which is loaded using Python's regular import
+# mechanism, to prevent Ansible's PluginLoader from making up a fake name that
+# results in ansible_mitogen plugin modules being loaded twice: once by
+# PluginLoader with a name like "ansible.plugins.strategy.mitogen", which is
+# stuffed into sys.modules even though attempting to import it will trigger an
+# ImportError, and once under its canonical name, "ansible_mitogen.strategy".
+#
+# Therefore we have a proxy module that imports it under the real name, and
+# sets up the duff PluginLoader-imported module to just contain objects from
+# the real module, so duplicate types don't exist in memory, and things like
+# debuggers and isinstance() work predictably.
+#
+
+BASE_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../../..')
+)
+
+if BASE_DIR not in sys.path:
+    sys.path.insert(0, BASE_DIR)
+
+import ansible_mitogen.strategy
+import ansible.plugins.strategy.linear
+
+
+class StrategyModule(ansible_mitogen.strategy.StrategyMixin,
+                     ansible.plugins.strategy.linear.StrategyModule):
+    pass
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen_free.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen_free.py
new file mode 100644
index 0000000000000000000000000000000000000000..ffe2fbd9465aef88dab2d975955401792021ea32
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen_free.py
@@ -0,0 +1,62 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+#
+# This is not the real Strategy implementation module, it simply exists as a
+# proxy to the real module, which is loaded using Python's regular import
+# mechanism, to prevent Ansible's PluginLoader from making up a fake name that
+# results in ansible_mitogen plugin modules being loaded twice: once by
+# PluginLoader with a name like "ansible.plugins.strategy.mitogen", which is
+# stuffed into sys.modules even though attempting to import it will trigger an
+# ImportError, and once under its canonical name, "ansible_mitogen.strategy".
+#
+# Therefore we have a proxy module that imports it under the real name, and
+# sets up the duff PluginLoader-imported module to just contain objects from
+# the real module, so duplicate types don't exist in memory, and things like
+# debuggers and isinstance() work predictably.
+#
+
+BASE_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../../..')
+)
+
+if BASE_DIR not in sys.path:
+    sys.path.insert(0, BASE_DIR)
+
+import ansible_mitogen.loaders
+import ansible_mitogen.strategy
+
+
+Base = ansible_mitogen.loaders.strategy_loader.get('free', class_only=True)
+
+class StrategyModule(ansible_mitogen.strategy.StrategyMixin, Base):
+    pass
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py
new file mode 100644
index 0000000000000000000000000000000000000000..23eccd3699fc925c476ba2794a5999bcc691041f
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py
@@ -0,0 +1,67 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+#
+# This is not the real Strategy implementation module, it simply exists as a
+# proxy to the real module, which is loaded using Python's regular import
+# mechanism, to prevent Ansible's PluginLoader from making up a fake name that
+# results in ansible_mitogen plugin modules being loaded twice: once by
+# PluginLoader with a name like "ansible.plugins.strategy.mitogen", which is
+# stuffed into sys.modules even though attempting to import it will trigger an
+# ImportError, and once under its canonical name, "ansible_mitogen.strategy".
+#
+# Therefore we have a proxy module that imports it under the real name, and
+# sets up the duff PluginLoader-imported module to just contain objects from
+# the real module, so duplicate types don't exist in memory, and things like
+# debuggers and isinstance() work predictably.
+#
+
+BASE_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../../..')
+)
+
+if BASE_DIR not in sys.path:
+    sys.path.insert(0, BASE_DIR)
+
+import ansible_mitogen.loaders
+import ansible_mitogen.strategy
+
+
+Base = ansible_mitogen.loaders.strategy_loader.get('host_pinned', class_only=True)
+
+if Base is None:
+    raise ImportError(
+        'The host_pinned strategy is only available in Ansible 2.7 or newer.'
+    )
+
+class StrategyModule(ansible_mitogen.strategy.StrategyMixin, Base):
+    pass
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen_linear.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen_linear.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b198e61d4f2822d3451b2aed6c5c4d1d563fff7
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen_linear.py
@@ -0,0 +1,62 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+#
+# This is not the real Strategy implementation module, it simply exists as a
+# proxy to the real module, which is loaded using Python's regular import
+# mechanism, to prevent Ansible's PluginLoader from making up a fake name that
+# results in ansible_mitogen plugin modules being loaded twice: once by
+# PluginLoader with a name like "ansible.plugins.strategy.mitogen", which is
+# stuffed into sys.modules even though attempting to import it will trigger an
+# ImportError, and once under its canonical name, "ansible_mitogen.strategy".
+#
+# Therefore we have a proxy module that imports it under the real name, and
+# sets up the duff PluginLoader-imported module to just contain objects from
+# the real module, so duplicate types don't exist in memory, and things like
+# debuggers and isinstance() work predictably.
+#
+
+BASE_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../../..')
+)
+
+if BASE_DIR not in sys.path:
+    sys.path.insert(0, BASE_DIR)
+
+import ansible_mitogen.loaders
+import ansible_mitogen.strategy
+
+
+Base = ansible_mitogen.loaders.strategy_loader.get('linear', class_only=True)
+
+class StrategyModule(ansible_mitogen.strategy.StrategyMixin, Base):
+    pass
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/process.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/process.py
new file mode 100644
index 0000000000000000000000000000000000000000..1fc7bf801a76d488226b638c0366d7577d7d4421
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/process.py
@@ -0,0 +1,745 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import atexit
+import logging
+import multiprocessing
+import os
+import resource
+import socket
+import signal
+import sys
+
+try:
+    import faulthandler
+except ImportError:
+    faulthandler = None
+
+try:
+    import setproctitle
+except ImportError:
+    setproctitle = None
+
+import mitogen
+import mitogen.core
+import mitogen.debug
+import mitogen.fork
+import mitogen.master
+import mitogen.parent
+import mitogen.service
+import mitogen.unix
+import mitogen.utils
+
+import ansible
+import ansible.constants as C
+import ansible.errors
+import ansible_mitogen.logging
+import ansible_mitogen.services
+
+from mitogen.core import b
+import ansible_mitogen.affinity
+
+
+LOG = logging.getLogger(__name__)
+
+ANSIBLE_PKG_OVERRIDE = (
+    u"__version__ = %r\n"
+    u"__author__ = %r\n"
+)
+
+MAX_MESSAGE_SIZE = 4096 * 1048576
+
+worker_model_msg = (
+    'Mitogen connection types may only be instantiated when one of the '
+    '"mitogen_*" or "operon_*" strategies are active.'
+)
+
+shutting_down_msg = (
+    'The task worker cannot connect. Ansible may be shutting down, or '
+    'the maximum open files limit may have been exceeded. If this occurs '
+    'midway through a run, please retry after increasing the open file '
+    'limit (ulimit -n). Original error: %s'
+)
+
+
+#: The worker model as configured by the currently running strategy. This is
+#: managed via :func:`get_worker_model` / :func:`set_worker_model` functions by
+#: :class:`StrategyMixin`.
+_worker_model = None
+
+
+#: A copy of the sole :class:`ClassicWorkerModel` that ever exists during a
+#: classic run, as return by :func:`get_classic_worker_model`.
+_classic_worker_model = None
+
+
+def set_worker_model(model):
+    """
+    To remove process model-wiring from
+    :class:`ansible_mitogen.connection.Connection`, it is necessary to track
+    some idea of the configured execution environment outside the connection
+    plug-in.
+
+    That is what :func:`set_worker_model` and :func:`get_worker_model` are for.
+    """
+    global _worker_model
+    assert model is None or _worker_model is None
+    _worker_model = model
+
+
+def get_worker_model():
+    """
+    Return the :class:`WorkerModel` currently configured by the running
+    strategy.
+    """
+    if _worker_model is None:
+        raise ansible.errors.AnsibleConnectionFailure(worker_model_msg)
+    return _worker_model
+
+
+def get_classic_worker_model(**kwargs):
+    """
+    Return the single :class:`ClassicWorkerModel` instance, constructing it if
+    necessary.
+    """
+    global _classic_worker_model
+    assert _classic_worker_model is None or (not kwargs), \
+        "ClassicWorkerModel kwargs supplied but model already constructed"
+
+    if _classic_worker_model is None:
+        _classic_worker_model = ClassicWorkerModel(**kwargs)
+    return _classic_worker_model
+
+
+def getenv_int(key, default=0):
+    """
+    Get an integer-valued environment variable `key`, if it exists and parses
+    as an integer, otherwise return `default`.
+    """
+    try:
+        return int(os.environ.get(key, str(default)))
+    except ValueError:
+        return default
+
+
+def save_pid(name):
+    """
+    When debugging and profiling, it is very annoying to poke through the
+    process list to discover the currently running Ansible and MuxProcess IDs,
+    especially when trying to catch an issue during early startup. So here, if
+    a magic environment variable set, stash them in hidden files in the CWD::
+
+        alias muxpid="cat .ansible-mux.pid"
+        alias anspid="cat .ansible-controller.pid"
+
+        gdb -p $(muxpid)
+        perf top -p $(anspid)
+    """
+    if os.environ.get('MITOGEN_SAVE_PIDS'):
+        with open('.ansible-%s.pid' % (name,), 'w') as fp:
+            fp.write(str(os.getpid()))
+
+
+def setup_pool(pool):
+    """
+    Configure a connection multiplexer's :class:`mitogen.service.Pool` with
+    services accessed by clients and WorkerProcesses.
+    """
+    pool.add(mitogen.service.FileService(router=pool.router))
+    pool.add(mitogen.service.PushFileService(router=pool.router))
+    pool.add(ansible_mitogen.services.ContextService(router=pool.router))
+    pool.add(ansible_mitogen.services.ModuleDepService(pool.router))
+    LOG.debug('Service pool configured: size=%d', pool.size)
+
+
+def _setup_simplejson(responder):
+    """
+    We support serving simplejson for Python 2.4 targets on Ansible 2.3, at
+    least so the package's own CI Docker scripts can run without external
+    help, however newer versions of simplejson no longer support Python
+    2.4. Therefore override any installed/loaded version with a
+    2.4-compatible version we ship in the compat/ directory.
+    """
+    responder.whitelist_prefix('simplejson')
+
+    # issue #536: must be at end of sys.path, in case existing newer
+    # version is already loaded.
+    compat_path = os.path.join(os.path.dirname(__file__), 'compat')
+    sys.path.append(compat_path)
+
+    for fullname, is_pkg, suffix in (
+        (u'simplejson', True, '__init__.py'),
+        (u'simplejson.decoder', False, 'decoder.py'),
+        (u'simplejson.encoder', False, 'encoder.py'),
+        (u'simplejson.scanner', False, 'scanner.py'),
+    ):
+        path = os.path.join(compat_path, 'simplejson', suffix)
+        fp = open(path, 'rb')
+        try:
+            source = fp.read()
+        finally:
+            fp.close()
+
+        responder.add_source_override(
+            fullname=fullname,
+            path=path,
+            source=source,
+            is_pkg=is_pkg,
+        )
+
+
+def _setup_responder(responder):
+    """
+    Configure :class:`mitogen.master.ModuleResponder` to only permit
+    certain packages, and to generate custom responses for certain modules.
+    """
+    responder.whitelist_prefix('ansible')
+    responder.whitelist_prefix('ansible_mitogen')
+    _setup_simplejson(responder)
+
+    # Ansible 2.3 is compatible with Python 2.4 targets, however
+    # ansible/__init__.py is not. Instead, executor/module_common.py writes
+    # out a 2.4-compatible namespace package for unknown reasons. So we
+    # copy it here.
+    responder.add_source_override(
+        fullname='ansible',
+        path=ansible.__file__,
+        source=(ANSIBLE_PKG_OVERRIDE % (
+            ansible.__version__,
+            ansible.__author__,
+        )).encode(),
+        is_pkg=True,
+    )
+
+
+def increase_open_file_limit():
+    """
+    #549: in order to reduce the possibility of hitting an open files limit,
+    increase :data:`resource.RLIMIT_NOFILE` from its soft limit to its hard
+    limit, if they differ.
+
+    It is common that a low soft limit is configured by default, where the hard
+    limit is much higher.
+    """
+    soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
+    if hard == resource.RLIM_INFINITY:
+        hard_s = '(infinity)'
+        # cap in case of O(RLIMIT_NOFILE) algorithm in some subprocess.
+        hard = 524288
+    else:
+        hard_s = str(hard)
+
+    LOG.debug('inherited open file limits: soft=%d hard=%s', soft, hard_s)
+    if soft >= hard:
+        LOG.debug('max open files already set to hard limit: %d', hard)
+        return
+
+    # OS X is limited by kern.maxfilesperproc sysctl, rather than the
+    # advertised unlimited hard RLIMIT_NOFILE. Just hard-wire known defaults
+    # for that sysctl, to avoid the mess of querying it.
+    for value in (hard, 10240):
+        try:
+            resource.setrlimit(resource.RLIMIT_NOFILE, (value, hard))
+            LOG.debug('raised soft open file limit from %d to %d', soft, value)
+            break
+        except ValueError as e:
+            LOG.debug('could not raise soft open file limit from %d to %d: %s',
+                      soft, value, e)
+
+
+def common_setup(enable_affinity=True, _init_logging=True):
+    save_pid('controller')
+    ansible_mitogen.logging.set_process_name('top')
+
+    if _init_logging:
+        ansible_mitogen.logging.setup()
+
+    if enable_affinity:
+        ansible_mitogen.affinity.policy.assign_controller()
+
+    mitogen.utils.setup_gil()
+    if faulthandler is not None:
+        faulthandler.enable()
+
+    MuxProcess.profiling = getenv_int('MITOGEN_PROFILING') > 0
+    if MuxProcess.profiling:
+        mitogen.core.enable_profiling()
+
+    MuxProcess.cls_original_env = dict(os.environ)
+    increase_open_file_limit()
+
+
+def get_cpu_count(default=None):
+    """
+    Get the multiplexer CPU count from the MITOGEN_CPU_COUNT environment
+    variable, returning `default` if one isn't set, or is out of range.
+
+    :param int default:
+        Default CPU, or :data:`None` to use all available CPUs.
+    """
+    max_cpus = multiprocessing.cpu_count()
+    if default is None:
+        default = max_cpus
+
+    cpu_count = getenv_int('MITOGEN_CPU_COUNT', default=default)
+    if cpu_count < 1 or cpu_count > max_cpus:
+        cpu_count = default
+
+    return cpu_count
+
+
+class Broker(mitogen.master.Broker):
+    """
+    WorkerProcess maintains at most 2 file descriptors, therefore does not need
+    the exuberant syscall expense of EpollPoller, so override it and restore
+    the poll() poller.
+    """
+    poller_class = mitogen.core.Poller
+
+
+class Binding(object):
+    """
+    Represent a bound connection for a particular inventory hostname. When
+    operating in sharded mode, the actual MuxProcess implementing a connection
+    varies according to the target machine. Depending on the particular
+    implementation, this class represents a binding to the correct MuxProcess.
+    """
+    def get_child_service_context(self):
+        """
+        Return the :class:`mitogen.core.Context` to which children should
+        direct requests for services such as FileService, or :data:`None` for
+        the local process.
+
+        This can be different from :meth:`get_service_context` where MuxProcess
+        and WorkerProcess are combined, and it is discovered a task is
+        delegated after being assigned to its initial worker for the original
+        un-delegated hostname. In that case, connection management and
+        expensive services like file transfer must be implemented by the
+        MuxProcess connected to the target, rather than routed to the
+        MuxProcess responsible for executing the task.
+        """
+        raise NotImplementedError()
+
+    def get_service_context(self):
+        """
+        Return the :class:`mitogen.core.Context` to which this process should
+        direct ContextService requests, or :data:`None` for the local process.
+        """
+        raise NotImplementedError()
+
+    def close(self):
+        """
+        Finalize any associated resources.
+        """
+        raise NotImplementedError()
+
+
+class WorkerModel(object):
+    """
+    Interface used by StrategyMixin to manage various Mitogen services, by
+    default running in one or more connection multiplexer subprocesses spawned
+    off the top-level Ansible process.
+    """
+    def on_strategy_start(self):
+        """
+        Called prior to strategy start in the top-level process. Responsible
+        for preparing any worker/connection multiplexer state.
+        """
+        raise NotImplementedError()
+
+    def on_strategy_complete(self):
+        """
+        Called after strategy completion in the top-level process. Must place
+        Ansible back in a "compatible" state where any other strategy plug-in
+        may execute.
+        """
+        raise NotImplementedError()
+
+    def get_binding(self, inventory_name):
+        """
+        Return a :class:`Binding` to access Mitogen services for
+        `inventory_name`. Usually called from worker processes, but may also be
+        called from top-level process to handle "meta: reset_connection".
+        """
+        raise NotImplementedError()
+
+
+class ClassicBinding(Binding):
+    """
+    Only one connection may be active at a time in a classic worker, so its
+    binding just provides forwarders back to :class:`ClassicWorkerModel`.
+    """
+    def __init__(self, model):
+        self.model = model
+
+    def get_service_context(self):
+        """
+        See Binding.get_service_context().
+        """
+        return self.model.parent
+
+    def get_child_service_context(self):
+        """
+        See Binding.get_child_service_context().
+        """
+        return self.model.parent
+
+    def close(self):
+        """
+        See Binding.close().
+        """
+        self.model.on_binding_close()
+
+
+class ClassicWorkerModel(WorkerModel):
+    #: In the top-level process, this references one end of a socketpair(),
+    #: whose other end child MuxProcesses block reading from to determine when
+    #: the master process dies. When the top-level exits abnormally, or
+    #: normally but where :func:`_on_process_exit` has been called, this socket
+    #: will be closed, causing all the children to wake.
+    parent_sock = None
+
+    #: In the mux process, this is the other end of :attr:`cls_parent_sock`.
+    #: The main thread blocks on a read from it until :attr:`cls_parent_sock`
+    #: is closed.
+    child_sock = None
+
+    #: mitogen.master.Router for this worker.
+    router = None
+
+    #: mitogen.master.Broker for this worker.
+    broker = None
+
+    #: Name of multiplexer process socket we are currently connected to.
+    listener_path = None
+
+    #: mitogen.parent.Context representing the parent Context, which is the
+    #: connection multiplexer process when running in classic mode, or the
+    #: top-level process when running a new-style mode.
+    parent = None
+
+    def __init__(self, _init_logging=True):
+        """
+        Arrange for classic model multiplexers to be started. The parent choses
+        UNIX socket paths each child will use prior to fork, creates a
+        socketpair used essentially as a semaphore, then blocks waiting for the
+        child to indicate the UNIX socket is ready for use.
+
+        :param bool _init_logging:
+            For testing, if :data:`False`, don't initialize logging.
+        """
+        # #573: The process ID that installed the :mod:`atexit` handler. If
+        # some unknown Ansible plug-in forks the Ansible top-level process and
+        # later performs a graceful Python exit, it may try to wait for child
+        # PIDs it never owned, causing a crash. We want to avoid that.
+        self._pid = os.getpid()
+
+        common_setup(_init_logging=_init_logging)
+
+        self.parent_sock, self.child_sock = socket.socketpair()
+        mitogen.core.set_cloexec(self.parent_sock.fileno())
+        mitogen.core.set_cloexec(self.child_sock.fileno())
+
+        self._muxes = [
+            MuxProcess(self, index)
+            for index in range(get_cpu_count(default=1))
+        ]
+        for mux in self._muxes:
+            mux.start()
+
+        atexit.register(self._on_process_exit)
+        self.child_sock.close()
+        self.child_sock = None
+
+    def _listener_for_name(self, name):
+        """
+        Given an inventory hostname, return the UNIX listener that should
+        communicate with it. This is a simple hash of the inventory name.
+        """
+        mux = self._muxes[abs(hash(name)) % len(self._muxes)]
+        LOG.debug('will use multiplexer %d (%s) to connect to "%s"',
+                  mux.index, mux.path, name)
+        return mux.path
+
+    def _reconnect(self, path):
+        if self.router is not None:
+            # Router can just be overwritten, but the previous parent
+            # connection must explicitly be removed from the broker first.
+            self.router.disconnect(self.parent)
+            self.parent = None
+            self.router = None
+
+        try:
+            self.router, self.parent = mitogen.unix.connect(
+                path=path,
+                broker=self.broker,
+            )
+        except mitogen.unix.ConnectError as e:
+            # This is not AnsibleConnectionFailure since we want to break
+            # with_items loops.
+            raise ansible.errors.AnsibleError(shutting_down_msg % (e,))
+
+        self.router.max_message_size = MAX_MESSAGE_SIZE
+        self.listener_path = path
+
+    def _on_process_exit(self):
+        """
+        This is an :mod:`atexit` handler installed in the top-level process.
+
+        Shut the write end of `sock`, causing the receive side of the socket in
+        every :class:`MuxProcess` to return 0-byte reads, and causing their
+        main threads to wake and initiate shutdown. After shutting the socket
+        down, wait on each child to finish exiting.
+
+        This is done using :mod:`atexit` since Ansible lacks any better hook to
+        run code during exit, and unless some synchronization exists with
+        MuxProcess, debug logs may appear on the user's terminal *after* the
+        prompt has been printed.
+        """
+        if self._pid != os.getpid():
+            return
+
+        try:
+            self.parent_sock.shutdown(socket.SHUT_WR)
+        except socket.error:
+            # Already closed. This is possible when tests are running.
+            LOG.debug('_on_process_exit: ignoring duplicate call')
+            return
+
+        mitogen.core.io_op(self.parent_sock.recv, 1)
+        self.parent_sock.close()
+
+        for mux in self._muxes:
+            _, status = os.waitpid(mux.pid, 0)
+            status = mitogen.fork._convert_exit_status(status)
+            LOG.debug('multiplexer %d PID %d %s', mux.index, mux.pid,
+                      mitogen.parent.returncode_to_str(status))
+
+    def _test_reset(self):
+        """
+        Used to clean up in unit tests.
+        """
+        self.on_binding_close()
+        self._on_process_exit()
+        set_worker_model(None)
+
+        global _classic_worker_model
+        _classic_worker_model = None
+
+    def on_strategy_start(self):
+        """
+        See WorkerModel.on_strategy_start().
+        """
+
+    def on_strategy_complete(self):
+        """
+        See WorkerModel.on_strategy_complete().
+        """
+
+    def get_binding(self, inventory_name):
+        """
+        See WorkerModel.get_binding().
+        """
+        if self.broker is None:
+            self.broker = Broker()
+
+        path = self._listener_for_name(inventory_name)
+        if path != self.listener_path:
+            self._reconnect(path)
+
+        return ClassicBinding(self)
+
+    def on_binding_close(self):
+        if not self.broker:
+            return
+
+        self.broker.shutdown()
+        self.broker.join()
+        self.router = None
+        self.broker = None
+        self.parent = None
+        self.listener_path = None
+
+        # #420: Ansible executes "meta" actions in the top-level process,
+        # meaning "reset_connection" will cause :class:`mitogen.core.Latch` FDs
+        # to be cached and erroneously shared by children on subsequent
+        # WorkerProcess forks. To handle that, call on_fork() to ensure any
+        # shared state is discarded.
+        # #490: only attempt to clean up when it's known that some resources
+        # exist to cleanup, otherwise later __del__ double-call to close() due
+        # to GC at random moment may obliterate an unrelated Connection's
+        # related resources.
+        mitogen.fork.on_fork()
+
+
+class MuxProcess(object):
+    """
+    Implement a subprocess forked from the Ansible top-level, as a safe place
+    to contain the Mitogen IO multiplexer thread, keeping its use of the
+    logging package (and the logging package's heavy use of locks) far away
+    from os.fork(), which is used continuously by the multiprocessing package
+    in the top-level process.
+
+    The problem with running the multiplexer in that process is that should the
+    multiplexer thread be in the process of emitting a log entry (and holding
+    its lock) at the point of fork, in the child, the first attempt to log any
+    log entry using the same handler will deadlock the child, as in the memory
+    image the child received, the lock will always be marked held.
+
+    See https://bugs.python.org/issue6721 for a thorough description of the
+    class of problems this worker is intended to avoid.
+    """
+    #: A copy of :data:`os.environ` at the time the multiplexer process was
+    #: started. It's used by mitogen_local.py to find changes made to the
+    #: top-level environment (e.g. vars plugins -- issue #297) that must be
+    #: applied to locally executed commands and modules.
+    cls_original_env = None
+
+    def __init__(self, model, index):
+        #: :class:`ClassicWorkerModel` instance we were created by.
+        self.model = model
+        #: MuxProcess CPU index.
+        self.index = index
+        #: Individual path of this process.
+        self.path = mitogen.unix.make_socket_path()
+
+    def start(self):
+        self.pid = os.fork()
+        if self.pid:
+            # Wait for child to boot before continuing.
+            mitogen.core.io_op(self.model.parent_sock.recv, 1)
+            return
+
+        ansible_mitogen.logging.set_process_name('mux:' + str(self.index))
+        if setproctitle:
+            setproctitle.setproctitle('mitogen mux:%s (%s)' % (
+                self.index,
+                os.path.basename(self.path),
+            ))
+
+        self.model.parent_sock.close()
+        self.model.parent_sock = None
+        try:
+            try:
+                self.worker_main()
+            except Exception:
+                LOG.exception('worker_main() crashed')
+        finally:
+            sys.exit()
+
+    def worker_main(self):
+        """
+        The main function of the mux process: setup the Mitogen broker thread
+        and ansible_mitogen services, then sleep waiting for the socket
+        connected to the parent to be closed (indicating the parent has died).
+        """
+        save_pid('mux')
+
+        # #623: MuxProcess ignores SIGINT because it wants to live until every
+        # Ansible worker process has been cleaned up by
+        # TaskQueueManager.cleanup(), otherwise harmles yet scary warnings
+        # about being unable connect to MuxProess could be printed.
+        signal.signal(signal.SIGINT, signal.SIG_IGN)
+        ansible_mitogen.logging.set_process_name('mux')
+        ansible_mitogen.affinity.policy.assign_muxprocess(self.index)
+
+        self._setup_master()
+        self._setup_services()
+
+        try:
+            # Let the parent know our listening socket is ready.
+            mitogen.core.io_op(self.model.child_sock.send, b('1'))
+            # Block until the socket is closed, which happens on parent exit.
+            mitogen.core.io_op(self.model.child_sock.recv, 1)
+        finally:
+            self.broker.shutdown()
+            self.broker.join()
+
+            # Test frameworks living somewhere higher on the stack of the
+            # original parent process may try to catch sys.exit(), so do a C
+            # level exit instead.
+            os._exit(0)
+
+    def _enable_router_debug(self):
+        if 'MITOGEN_ROUTER_DEBUG' in os.environ:
+            self.router.enable_debug()
+
+    def _enable_stack_dumps(self):
+        secs = getenv_int('MITOGEN_DUMP_THREAD_STACKS', default=0)
+        if secs:
+            mitogen.debug.dump_to_logger(secs=secs)
+
+    def _setup_master(self):
+        """
+        Construct a Router, Broker, and mitogen.unix listener
+        """
+        self.broker = mitogen.master.Broker(install_watcher=False)
+        self.router = mitogen.master.Router(
+            broker=self.broker,
+            max_message_size=MAX_MESSAGE_SIZE,
+        )
+        _setup_responder(self.router.responder)
+        mitogen.core.listen(self.broker, 'shutdown', self._on_broker_shutdown)
+        mitogen.core.listen(self.broker, 'exit', self._on_broker_exit)
+        self.listener = mitogen.unix.Listener.build_stream(
+            router=self.router,
+            path=self.path,
+            backlog=C.DEFAULT_FORKS,
+        )
+        self._enable_router_debug()
+        self._enable_stack_dumps()
+
+    def _setup_services(self):
+        """
+        Construct a ContextService and a thread to service requests for it
+        arriving from worker processes.
+        """
+        self.pool = mitogen.service.Pool(
+            router=self.router,
+            size=getenv_int('MITOGEN_POOL_SIZE', default=32),
+        )
+        setup_pool(self.pool)
+
+    def _on_broker_shutdown(self):
+        """
+        Respond to broker shutdown by shutting down the pool. Do not join on it
+        yet, since that would block the broker thread which then cannot clean
+        up pending handlers and connections, which is required for the threads
+        to exit gracefully.
+        """
+        self.pool.stop(join=False)
+
+    def _on_broker_exit(self):
+        """
+        Respond to the broker thread about to exit by finally joining on the
+        pool. This is safe since pools only block in connection attempts, and
+        connection attempts fail with CancelledError when broker shutdown
+        begins.
+        """
+        self.pool.join()
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/runner.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/runner.py
new file mode 100644
index 0000000000000000000000000000000000000000..064023442294607ed2db537e89edbb2308d0bdbd
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/runner.py
@@ -0,0 +1,1020 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+These classes implement execution for each style of Ansible module. They are
+instantiated in the target context by way of target.py::run_module().
+
+Each class in here has a corresponding Planner class in planners.py that knows
+how to build arguments for it, preseed related data, etc.
+"""
+
+import atexit
+import imp
+import os
+import re
+import shlex
+import shutil
+import sys
+import tempfile
+import traceback
+import types
+
+import mitogen.core
+import ansible_mitogen.target  # TODO: circular import
+from mitogen.core import b
+from mitogen.core import bytes_partition
+from mitogen.core import str_rpartition
+from mitogen.core import to_text
+
+try:
+    import ctypes
+except ImportError:
+    # Python 2.4
+    ctypes = None
+
+try:
+    import json
+except ImportError:
+    # Python 2.4
+    import simplejson as json
+
+try:
+    # Cannot use cStringIO as it does not support Unicode.
+    from StringIO import StringIO
+except ImportError:
+    from io import StringIO
+
+try:
+    from shlex import quote as shlex_quote
+except ImportError:
+    from pipes import quote as shlex_quote
+
+# Absolute imports for <2.5.
+logging = __import__('logging')
+
+
+# Prevent accidental import of an Ansible module from hanging on stdin read.
+import ansible.module_utils.basic
+ansible.module_utils.basic._ANSIBLE_ARGS = '{}'
+
+# For tasks that modify /etc/resolv.conf, non-Debian derivative glibcs cache
+# resolv.conf at startup and never implicitly reload it. Cope with that via an
+# explicit call to res_init() on each task invocation. BSD-alikes export it
+# directly, Linux #defines it as "__res_init".
+libc__res_init = None
+if ctypes:
+    libc = ctypes.CDLL(None)
+    for symbol in 'res_init', '__res_init':
+        try:
+            libc__res_init = getattr(libc, symbol)
+        except AttributeError:
+            pass
+
+iteritems = getattr(dict, 'iteritems', dict.items)
+LOG = logging.getLogger(__name__)
+
+
+def shlex_split_b(s):
+    """
+    Use shlex.split() to split characters in some single-byte encoding, without
+    knowing what that encoding is. The input is bytes, the output is a list of
+    bytes.
+    """
+    assert isinstance(s, mitogen.core.BytesType)
+    if mitogen.core.PY3:
+        return [
+            t.encode('latin1')
+            for t in shlex.split(s.decode('latin1'), comments=True)
+        ]
+
+    return [t for t in shlex.split(s, comments=True)]
+
+
+class TempFileWatcher(object):
+    """
+    Since Ansible 2.7.0, lineinfile leaks file descriptors returned by
+    :func:`tempfile.mkstemp` (ansible/ansible#57327). Handle this and all
+    similar cases by recording descriptors produced by mkstemp during module
+    execution, and cleaning up any leaked descriptors on completion.
+    """
+    def __init__(self):
+        self._real_mkstemp = tempfile.mkstemp
+        # (fd, st.st_dev, st.st_ino)
+        self._fd_dev_inode = []
+        tempfile.mkstemp = self._wrap_mkstemp
+
+    def _wrap_mkstemp(self, *args, **kwargs):
+        fd, path = self._real_mkstemp(*args, **kwargs)
+        st = os.fstat(fd)
+        self._fd_dev_inode.append((fd, st.st_dev, st.st_ino))
+        return fd, path
+
+    def revert(self):
+        tempfile.mkstemp = self._real_mkstemp
+        for tup in self._fd_dev_inode:
+            self._revert_one(*tup)
+
+    def _revert_one(self, fd, st_dev, st_ino):
+        try:
+            st = os.fstat(fd)
+        except OSError:
+            # FD no longer exists.
+            return
+
+        if not (st.st_dev == st_dev and st.st_ino == st_ino):
+            # FD reused.
+            return
+
+        LOG.info("a tempfile.mkstemp() FD was leaked during the last task")
+        os.close(fd)
+
+
+class EnvironmentFileWatcher(object):
+    """
+    Usually Ansible edits to /etc/environment and ~/.pam_environment are
+    reflected in subsequent tasks if become:true or SSH multiplexing is
+    disabled, due to sudo and/or SSH reinvoking pam_env. Rather than emulate
+    existing semantics, do our best to ensure edits are always reflected.
+
+    This can't perfectly replicate the existing behaviour, but it can safely
+    update and remove keys that appear to originate in `path`, and that do not
+    conflict with any existing environment key inherited from elsewhere.
+
+    A more robust future approach may simply be to arrange for the persistent
+    interpreter to restart when a change is detected.
+    """
+    # We know nothing about the character set of /etc/environment or the
+    # process environment.
+    environ = getattr(os, 'environb', os.environ)
+
+    def __init__(self, path):
+        self.path = os.path.expanduser(path)
+        #: Inode data at time of last check.
+        self._st = self._stat()
+        #: List of inherited keys appearing to originated from this file.
+        self._keys = [
+            key for key, value in self._load()
+            if value == self.environ.get(key)
+        ]
+        LOG.debug('%r installed; existing keys: %r', self, self._keys)
+
+    def __repr__(self):
+        return 'EnvironmentFileWatcher(%r)' % (self.path,)
+
+    def _stat(self):
+        try:
+            return os.stat(self.path)
+        except OSError:
+            return None
+
+    def _load(self):
+        try:
+            fp = open(self.path, 'rb')
+            try:
+                return list(self._parse(fp))
+            finally:
+                fp.close()
+        except IOError:
+            return []
+
+    def _parse(self, fp):
+        """
+        linux-pam-1.3.1/modules/pam_env/pam_env.c#L207
+        """
+        for line in fp:
+            # '   #export foo=some var  ' -> ['#export', 'foo=some var  ']
+            bits = shlex_split_b(line)
+            if (not bits) or bits[0].startswith(b('#')):
+                continue
+
+            if bits[0] == b('export'):
+                bits.pop(0)
+
+            key, sep, value = bytes_partition(b(' ').join(bits), b('='))
+            if key and sep:
+                yield key, value
+
+    def _on_file_changed(self):
+        LOG.debug('%r: file changed, reloading', self)
+        for key, value in self._load():
+            if key in self.environ:
+                LOG.debug('%r: existing key %r=%r exists, not setting %r',
+                          self, key, self.environ[key], value)
+            else:
+                LOG.debug('%r: setting key %r to %r', self, key, value)
+                self._keys.append(key)
+                self.environ[key] = value
+
+    def _remove_existing(self):
+        """
+        When a change is detected, remove keys that existed in the old file.
+        """
+        for key in self._keys:
+            if key in self.environ:
+                LOG.debug('%r: removing old key %r', self, key)
+                del self.environ[key]
+        self._keys = []
+
+    def check(self):
+        """
+        Compare the :func:`os.stat` for the pam_env style environmnt file
+        `path` with the previous result `old_st`, which may be :data:`None` if
+        the previous stat attempt failed. Reload its contents if the file has
+        changed or appeared since last attempt.
+
+        :returns:
+            New :func:`os.stat` result. The new call to :func:`reload_env` should
+            pass it as the value of `old_st`.
+        """
+        st = self._stat()
+        if self._st == st:
+            return
+
+        self._st = st
+        self._remove_existing()
+
+        if st is None:
+            LOG.debug('%r: file has disappeared', self)
+        else:
+            self._on_file_changed()
+
+_pam_env_watcher = EnvironmentFileWatcher('~/.pam_environment')
+_etc_env_watcher = EnvironmentFileWatcher('/etc/environment')
+
+
+def utf8(s):
+    """
+    Coerce an object to bytes if it is Unicode.
+    """
+    if isinstance(s, mitogen.core.UnicodeType):
+        s = s.encode('utf-8')
+    return s
+
+
+def reopen_readonly(fp):
+    """
+    Replace the file descriptor belonging to the file object `fp` with one
+    open on the same file (`fp.name`), but opened with :py:data:`os.O_RDONLY`.
+    This enables temporary files to be executed on Linux, which usually throws
+    ``ETXTBUSY`` if any writeable handle exists pointing to a file passed to
+    `execve()`.
+    """
+    fd = os.open(fp.name, os.O_RDONLY)
+    os.dup2(fd, fp.fileno())
+    os.close(fd)
+
+
+class Runner(object):
+    """
+    Ansible module runner. After instantiation (with kwargs supplied by the
+    corresponding Planner), `.run()` is invoked, upon which `setup()`,
+    `_run()`, and `revert()` are invoked, with the return value of `_run()`
+    returned by `run()`.
+
+    Subclasses may override `_run`()` and extend `setup()` and `revert()`.
+
+    :param str module:
+        Name of the module to execute, e.g. "shell"
+    :param mitogen.core.Context service_context:
+        Context to which we should direct FileService calls. For now, always
+        the connection multiplexer process on the controller.
+    :param str json_args:
+        Ansible module arguments. A mixture of user and internal keys created
+        by :meth:`ansible.plugins.action.ActionBase._execute_module`.
+
+        This is passed as a string rather than a dict in order to mimic the
+        implicit bytes/str conversion behaviour of a 2.x controller running
+        against a 3.x target.
+    :param str good_temp_dir:
+        The writeable temporary directory for this user account reported by
+        :func:`ansible_mitogen.target.init_child` passed via the controller.
+        This is specified explicitly to remain compatible with Ansible<2.5, and
+        for forked tasks where init_child never runs.
+    :param dict env:
+        Additional environment variables to set during the run. Keys with
+        :data:`None` are unset if present.
+    :param str cwd:
+        If not :data:`None`, change to this directory before executing.
+    :param mitogen.core.ExternalContext econtext:
+        When `detach` is :data:`True`, a reference to the ExternalContext the
+        runner is executing in.
+    :param bool detach:
+        When :data:`True`, indicate the runner should detach the context from
+        its parent after setup has completed successfully.
+    """
+    def __init__(self, module, service_context, json_args, good_temp_dir,
+                 extra_env=None, cwd=None, env=None, econtext=None,
+                 detach=False):
+        self.module = module
+        self.service_context = service_context
+        self.econtext = econtext
+        self.detach = detach
+        self.args = json.loads(mitogen.core.to_text(json_args))
+        self.good_temp_dir = good_temp_dir
+        self.extra_env = extra_env
+        self.env = env
+        self.cwd = cwd
+        #: If not :data:`None`, :meth:`get_temp_dir` had to create a temporary
+        #: directory for this run, because we're in an asynchronous task, or
+        #: because the originating action did not create a directory.
+        self._temp_dir = None
+
+    def get_temp_dir(self):
+        path = self.args.get('_ansible_tmpdir')
+        if path is not None:
+            return path
+
+        if self._temp_dir is None:
+            self._temp_dir = tempfile.mkdtemp(
+                prefix='ansible_mitogen_runner_',
+                dir=self.good_temp_dir,
+            )
+
+        return self._temp_dir
+
+    def revert_temp_dir(self):
+        if self._temp_dir is not None:
+            ansible_mitogen.target.prune_tree(self._temp_dir)
+            self._temp_dir = None
+
+    def setup(self):
+        """
+        Prepare for running a module, including fetching necessary dependencies
+        from the parent, as :meth:`run` may detach prior to beginning
+        execution. The base implementation simply prepares the environment.
+        """
+        self._setup_cwd()
+        self._setup_environ()
+
+    def _setup_cwd(self):
+        """
+        For situations like sudo to a non-privileged account, CWD could be
+        $HOME of the old account, which could have mode go=, which means it is
+        impossible to restore the old directory, so don't even try.
+        """
+        if self.cwd:
+            os.chdir(self.cwd)
+
+    def _setup_environ(self):
+        """
+        Apply changes from /etc/environment files before creating a
+        TemporaryEnvironment to snapshot environment state prior to module run.
+        """
+        _pam_env_watcher.check()
+        _etc_env_watcher.check()
+        env = dict(self.extra_env or {})
+        if self.env:
+            env.update(self.env)
+        self._env = TemporaryEnvironment(env)
+
+    def _revert_cwd(self):
+        """
+        #591: make a best-effort attempt to return to :attr:`good_temp_dir`.
+        """
+        try:
+            os.chdir(self.good_temp_dir)
+        except OSError:
+            LOG.debug('%r: could not restore CWD to %r',
+                      self, self.good_temp_dir)
+
+    def revert(self):
+        """
+        Revert any changes made to the process after running a module. The base
+        implementation simply restores the original environment.
+        """
+        self._revert_cwd()
+        self._env.revert()
+        self.revert_temp_dir()
+
+    def _run(self):
+        """
+        The _run() method is expected to return a dictionary in the form of
+        ActionBase._low_level_execute_command() output, i.e. having::
+
+            {
+                "rc": int,
+                "stdout": "stdout data",
+                "stderr": "stderr data"
+            }
+        """
+        raise NotImplementedError()
+
+    def run(self):
+        """
+        Set up the process environment in preparation for running an Ansible
+        module. This monkey-patches the Ansible libraries in various places to
+        prevent it from trying to kill the process on completion, and to
+        prevent it from reading sys.stdin.
+
+        :returns:
+            Module result dictionary.
+        """
+        self.setup()
+        if self.detach:
+            self.econtext.detach()
+
+        try:
+            return self._run()
+        finally:
+            self.revert()
+
+
+class AtExitWrapper(object):
+    """
+    issue #397, #454: Newer Ansibles use :func:`atexit.register` to trigger
+    tmpdir cleanup when AnsibleModule.tmpdir is responsible for creating its
+    own temporary directory, however with Mitogen processes are preserved
+    across tasks, meaning cleanup must happen earlier.
+
+    Patch :func:`atexit.register`, catching :func:`shutil.rmtree` calls so they
+    can be executed on task completion, rather than on process shutdown.
+    """
+    # Wrapped in a dict to avoid instance method decoration.
+    original = {
+        'register': atexit.register
+    }
+
+    def __init__(self):
+        assert atexit.register == self.original['register'], \
+            "AtExitWrapper installed twice."
+        atexit.register = self._atexit__register
+        self.deferred = []
+
+    def revert(self):
+        """
+        Restore the original :func:`atexit.register`.
+        """
+        assert atexit.register == self._atexit__register, \
+            "AtExitWrapper not installed."
+        atexit.register = self.original['register']
+
+    def run_callbacks(self):
+        while self.deferred:
+            func, targs, kwargs = self.deferred.pop()
+            try:
+                func(*targs, **kwargs)
+            except Exception:
+                LOG.exception('While running atexit callbacks')
+
+    def _atexit__register(self, func, *targs, **kwargs):
+        """
+        Intercept :func:`atexit.register` calls, diverting any to
+        :func:`shutil.rmtree` into a private list.
+        """
+        if func == shutil.rmtree:
+            self.deferred.append((func, targs, kwargs))
+            return
+
+        self.original['register'](func, *targs, **kwargs)
+
+
+class ModuleUtilsImporter(object):
+    """
+    :param list module_utils:
+        List of `(fullname, path, is_pkg)` tuples.
+    """
+    def __init__(self, context, module_utils):
+        self._context = context
+        self._by_fullname = dict(
+            (fullname, (path, is_pkg))
+            for fullname, path, is_pkg in module_utils
+        )
+        self._loaded = set()
+        sys.meta_path.insert(0, self)
+
+    def revert(self):
+        sys.meta_path.remove(self)
+        for fullname in self._loaded:
+            sys.modules.pop(fullname, None)
+
+    def find_module(self, fullname, path=None):
+        if fullname in self._by_fullname:
+            return self
+
+    def load_module(self, fullname):
+        path, is_pkg = self._by_fullname[fullname]
+        source = ansible_mitogen.target.get_small_file(self._context, path)
+        code = compile(source, path, 'exec', 0, 1)
+        mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
+        mod.__file__ = "master:%s" % (path,)
+        mod.__loader__ = self
+        if is_pkg:
+            mod.__path__ = []
+            mod.__package__ = str(fullname)
+        else:
+            mod.__package__ = str(str_rpartition(to_text(fullname), '.')[0])
+        exec(code, mod.__dict__)
+        self._loaded.add(fullname)
+        return mod
+
+
+class TemporaryEnvironment(object):
+    """
+    Apply environment changes from `env` until :meth:`revert` is called. Values
+    in the dict may be :data:`None` to indicate the relevant key should be
+    deleted.
+    """
+    def __init__(self, env=None):
+        self.original = dict(os.environ)
+        self.env = env or {}
+        for key, value in iteritems(self.env):
+            key = mitogen.core.to_text(key)
+            value = mitogen.core.to_text(value)
+            if value is None:
+                os.environ.pop(key, None)
+            else:
+                os.environ[key] = str(value)
+
+    def revert(self):
+        """
+        Revert changes made by the module to the process environment. This must
+        always run, as some modules (e.g. git.py) set variables like GIT_SSH
+        that must be cleared out between runs.
+        """
+        os.environ.clear()
+        os.environ.update(self.original)
+
+
+class TemporaryArgv(object):
+    def __init__(self, argv):
+        self.original = sys.argv[:]
+        sys.argv[:] = map(str, argv)
+
+    def revert(self):
+        sys.argv[:] = self.original
+
+
+class NewStyleStdio(object):
+    """
+    Patch ansible.module_utils.basic argument globals.
+    """
+    def __init__(self, args, temp_dir):
+        self.temp_dir = temp_dir
+        self.original_stdout = sys.stdout
+        self.original_stderr = sys.stderr
+        self.original_stdin = sys.stdin
+        sys.stdout = StringIO()
+        sys.stderr = StringIO()
+        encoded = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+        ansible.module_utils.basic._ANSIBLE_ARGS = utf8(encoded)
+        sys.stdin = StringIO(mitogen.core.to_text(encoded))
+
+        self.original_get_path = getattr(ansible.module_utils.basic,
+                                        'get_module_path', None)
+        ansible.module_utils.basic.get_module_path = self._get_path
+
+    def _get_path(self):
+        return self.temp_dir
+
+    def revert(self):
+        ansible.module_utils.basic.get_module_path = self.original_get_path
+        sys.stdout = self.original_stdout
+        sys.stderr = self.original_stderr
+        sys.stdin = self.original_stdin
+        ansible.module_utils.basic._ANSIBLE_ARGS = '{}'
+
+
+class ProgramRunner(Runner):
+    """
+    Base class for runners that run external programs.
+
+    :param str path:
+        Absolute path to the program file on the master, as it can be retrieved
+        via :class:`mitogen.service.FileService`.
+    :param bool emulate_tty:
+        If :data:`True`, execute the program with `stdout` and `stderr` merged
+        into a single pipe, emulating Ansible behaviour when an SSH TTY is in
+        use.
+    """
+    def __init__(self, path, emulate_tty=None, **kwargs):
+        super(ProgramRunner, self).__init__(**kwargs)
+        self.emulate_tty = emulate_tty
+        self.path = path
+
+    def setup(self):
+        super(ProgramRunner, self).setup()
+        self._setup_program()
+
+    def _get_program_filename(self):
+        """
+        Return the filename used for program on disk. Ansible uses the original
+        filename for non-Ansiballz runs, and "ansible_module_+filename for
+        Ansiballz runs.
+        """
+        return os.path.basename(self.path)
+
+    program_fp = None
+
+    def _setup_program(self):
+        """
+        Create a temporary file containing the program code. The code is
+        fetched via :meth:`_get_program`.
+        """
+        filename = self._get_program_filename()
+        path = os.path.join(self.get_temp_dir(), filename)
+        self.program_fp = open(path, 'wb')
+        self.program_fp.write(self._get_program())
+        self.program_fp.flush()
+        os.chmod(self.program_fp.name, int('0700', 8))
+        reopen_readonly(self.program_fp)
+
+    def _get_program(self):
+        """
+        Fetch the module binary from the master if necessary.
+        """
+        return ansible_mitogen.target.get_small_file(
+            context=self.service_context,
+            path=self.path,
+        )
+
+    def _get_program_args(self):
+        """
+        Return any arguments to pass to the program.
+        """
+        return []
+
+    def revert(self):
+        """
+        Delete the temporary program file.
+        """
+        if self.program_fp:
+            self.program_fp.close()
+        super(ProgramRunner, self).revert()
+
+    def _get_argv(self):
+        """
+        Return the final argument vector used to execute the program.
+        """
+        return [
+            self.args.get('_ansible_shell_executable', '/bin/sh'),
+            '-c',
+            self._get_shell_fragment(),
+        ]
+
+    def _get_shell_fragment(self):
+        return "%s %s" % (
+            shlex_quote(self.program_fp.name),
+            ' '.join(map(shlex_quote, self._get_program_args())),
+        )
+
+    def _run(self):
+        try:
+            rc, stdout, stderr = ansible_mitogen.target.exec_args(
+                args=self._get_argv(),
+                emulate_tty=self.emulate_tty,
+            )
+        except Exception:
+            LOG.exception('While running %s', self._get_argv())
+            e = sys.exc_info()[1]
+            return {
+                u'rc': 1,
+                u'stdout': u'',
+                u'stderr': u'%s: %s' % (type(e), e),
+            }
+
+        return {
+            u'rc': rc,
+            u'stdout': mitogen.core.to_text(stdout),
+            u'stderr': mitogen.core.to_text(stderr),
+        }
+
+
+class ArgsFileRunner(Runner):
+    def setup(self):
+        super(ArgsFileRunner, self).setup()
+        self._setup_args()
+
+    def _setup_args(self):
+        """
+        Create a temporary file containing the module's arguments. The
+        arguments are formatted via :meth:`_get_args`.
+        """
+        self.args_fp = tempfile.NamedTemporaryFile(
+            prefix='ansible_mitogen',
+            suffix='-args',
+            dir=self.get_temp_dir(),
+        )
+        self.args_fp.write(utf8(self._get_args_contents()))
+        self.args_fp.flush()
+        reopen_readonly(self.program_fp)
+
+    def _get_args_contents(self):
+        """
+        Return the module arguments formatted as JSON.
+        """
+        return json.dumps(self.args)
+
+    def _get_program_args(self):
+        return [self.args_fp.name]
+
+    def revert(self):
+        """
+        Delete the temporary argument file.
+        """
+        self.args_fp.close()
+        super(ArgsFileRunner, self).revert()
+
+
+class BinaryRunner(ArgsFileRunner, ProgramRunner):
+    pass
+
+
+class ScriptRunner(ProgramRunner):
+    def __init__(self, interpreter_fragment, is_python, **kwargs):
+        super(ScriptRunner, self).__init__(**kwargs)
+        self.interpreter_fragment = interpreter_fragment
+        self.is_python = is_python
+
+    b_ENCODING_STRING = b('# -*- coding: utf-8 -*-')
+
+    def _get_program(self):
+        return self._rewrite_source(
+            super(ScriptRunner, self)._get_program()
+        )
+
+    def _get_argv(self):
+        return [
+            self.args.get('_ansible_shell_executable', '/bin/sh'),
+            '-c',
+            self._get_shell_fragment(),
+        ]
+
+    def _get_shell_fragment(self):
+        """
+        Scripts are eligible for having their hashbang line rewritten, and to
+        be executed via /bin/sh using the ansible_*_interpreter value used as a
+        shell fragment prefixing to the invocation.
+        """
+        return "%s %s %s" % (
+            self.interpreter_fragment,
+            shlex_quote(self.program_fp.name),
+            ' '.join(map(shlex_quote, self._get_program_args())),
+        )
+
+    def _rewrite_source(self, s):
+        """
+        Mutate the source according to the per-task parameters.
+        """
+        # While Ansible rewrites the #! using ansible_*_interpreter, it is
+        # never actually used to execute the script, instead it is a shell
+        # fragment consumed by shell/__init__.py::build_module_command().
+        new = [b('#!') + utf8(self.interpreter_fragment)]
+        if self.is_python:
+            new.append(self.b_ENCODING_STRING)
+
+        _, _, rest = bytes_partition(s, b('\n'))
+        new.append(rest)
+        return b('\n').join(new)
+
+
+class NewStyleRunner(ScriptRunner):
+    """
+    Execute a new-style Ansible module, where Module Replacer-related tricks
+    aren't required.
+    """
+    #: path => new-style module bytecode.
+    _code_by_path = {}
+
+    def __init__(self, module_map, py_module_name, **kwargs):
+        super(NewStyleRunner, self).__init__(**kwargs)
+        self.module_map = module_map
+        self.py_module_name = py_module_name
+
+    def _setup_imports(self):
+        """
+        Ensure the local importer and PushFileService has everything for the
+        Ansible module before setup() completes, but before detach() is called
+        in an asynchronous task.
+
+        The master automatically streams modules towards us concurrent to the
+        runner invocation, however there is no public API to synchronize on the
+        completion of those preloads. Instead simply reuse the importer's
+        synchronization mechanism by importing everything the module will need
+        prior to detaching.
+        """
+        for fullname, _, _ in self.module_map['custom']:
+            mitogen.core.import_module(fullname)
+        for fullname in self.module_map['builtin']:
+            try:
+                mitogen.core.import_module(fullname)
+            except ImportError:
+                # #590: Ansible 2.8 module_utils.distro is a package that
+                # replaces itself in sys.modules with a non-package during
+                # import. Prior to replacement, it is a real package containing
+                # a '_distro' submodule which is used on 2.x. Given a 2.x
+                # controller and 3.x target, the import hook never needs to run
+                # again before this replacement occurs, and 'distro' is
+                # replaced with a module from the stdlib. In this case as this
+                # loop progresses to the next entry and attempts to preload
+                # 'distro._distro', the import mechanism will fail. So here we
+                # silently ignore any failure for it.
+                if fullname != 'ansible.module_utils.distro._distro':
+                    raise
+
+    def _setup_excepthook(self):
+        """
+        Starting with Ansible 2.6, some modules (file.py) install a
+        sys.excepthook and never clean it up. So we must preserve the original
+        excepthook and restore it after the run completes.
+        """
+        self.original_excepthook = sys.excepthook
+
+    def setup(self):
+        super(NewStyleRunner, self).setup()
+
+        self._stdio = NewStyleStdio(self.args, self.get_temp_dir())
+        # It is possible that not supplying the script filename will break some
+        # module, but this has never been a bug report. Instead act like an
+        # interpreter that had its script piped on stdin.
+        self._argv = TemporaryArgv([''])
+        self._temp_watcher = TempFileWatcher()
+        self._importer = ModuleUtilsImporter(
+            context=self.service_context,
+            module_utils=self.module_map['custom'],
+        )
+        self._setup_imports()
+        self._setup_excepthook()
+        self.atexit_wrapper = AtExitWrapper()
+        if libc__res_init:
+            libc__res_init()
+
+    def _revert_excepthook(self):
+        sys.excepthook = self.original_excepthook
+
+    def revert(self):
+        self.atexit_wrapper.revert()
+        self._temp_watcher.revert()
+        self._argv.revert()
+        self._stdio.revert()
+        self._revert_excepthook()
+        super(NewStyleRunner, self).revert()
+
+    def _get_program_filename(self):
+        """
+        See ProgramRunner._get_program_filename().
+        """
+        return 'ansible_module_' + os.path.basename(self.path)
+
+    def _setup_args(self):
+        pass
+
+    # issue #555: in old times it was considered good form to reload sys and
+    # change the default encoding. This hack was removed from Ansible long ago,
+    # but not before permeating into many third party modules.
+    PREHISTORIC_HACK_RE = re.compile(
+        b(r'reload\s*\(\s*sys\s*\)\s*'
+          r'sys\s*\.\s*setdefaultencoding\([^)]+\)')
+    )
+
+    def _setup_program(self):
+        source = ansible_mitogen.target.get_small_file(
+            context=self.service_context,
+            path=self.path,
+        )
+        self.source = self.PREHISTORIC_HACK_RE.sub(b(''), source)
+
+    def _get_code(self):
+        try:
+            return self._code_by_path[self.path]
+        except KeyError:
+            return self._code_by_path.setdefault(self.path, compile(
+                # Py2.4 doesn't support kwargs.
+                self.source,            # source
+                "master:" + self.path,  # filename
+                'exec',                 # mode
+                0,                      # flags
+                True,                   # dont_inherit
+            ))
+
+    if mitogen.core.PY3:
+        main_module_name = '__main__'
+    else:
+        main_module_name = b('__main__')
+
+    def _handle_magic_exception(self, mod, exc):
+        """
+        Beginning with Ansible >2.6, some modules (file.py) install a
+        sys.excepthook which is a closure over AnsibleModule, redirecting the
+        magical exception to AnsibleModule.fail_json().
+
+        For extra special needs bonus points, the class is not defined in
+        module_utils, but is defined in the module itself, meaning there is no
+        type for isinstance() that outlasts the invocation.
+        """
+        klass = getattr(mod, 'AnsibleModuleError', None)
+        if klass and isinstance(exc, klass):
+            mod.module.fail_json(**exc.results)
+
+    def _run_code(self, code, mod):
+        try:
+            if mitogen.core.PY3:
+                exec(code, vars(mod))
+            else:
+                exec('exec code in vars(mod)')
+        except Exception:
+            self._handle_magic_exception(mod, sys.exc_info()[1])
+            raise
+
+    def _get_module_package(self):
+        """
+        Since Ansible 2.9 __package__ must be set in accordance with an
+        approximation of the original package hierarchy, so that relative
+        imports function correctly.
+        """
+        pkg, sep, modname = str_rpartition(self.py_module_name, '.')
+        if not sep:
+            return None
+        if mitogen.core.PY3:
+            return pkg
+        return pkg.encode()
+
+    def _run(self):
+        mod = types.ModuleType(self.main_module_name)
+        mod.__package__ = self._get_module_package()
+        # Some Ansible modules use __file__ to find the Ansiballz temporary
+        # directory. We must provide some temporary path in __file__, but we
+        # don't want to pointlessly write the module to disk when it never
+        # actually needs to exist. So just pass the filename as it would exist.
+        mod.__file__ = os.path.join(
+            self.get_temp_dir(),
+            'ansible_module_' + os.path.basename(self.path),
+        )
+
+        code = self._get_code()
+        rc = 2
+        try:
+            try:
+                self._run_code(code, mod)
+            except SystemExit:
+                exc = sys.exc_info()[1]
+                rc = exc.args[0]
+            except Exception:
+                # This writes to stderr by default.
+                traceback.print_exc()
+                rc = 1
+
+        finally:
+            self.atexit_wrapper.run_callbacks()
+
+        return {
+            u'rc': rc,
+            u'stdout': mitogen.core.to_text(sys.stdout.getvalue()),
+            u'stderr': mitogen.core.to_text(sys.stderr.getvalue()),
+        }
+
+
+class JsonArgsRunner(ScriptRunner):
+    JSON_ARGS = b('<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>')
+
+    def _get_args_contents(self):
+        return json.dumps(self.args).encode()
+
+    def _rewrite_source(self, s):
+        return (
+            super(JsonArgsRunner, self)._rewrite_source(s)
+            .replace(self.JSON_ARGS, self._get_args_contents())
+        )
+
+
+class WantJsonRunner(ArgsFileRunner, ScriptRunner):
+    pass
+
+
+class OldStyleRunner(ArgsFileRunner, ScriptRunner):
+    def _get_args_contents(self):
+        """
+        Mimic the argument formatting behaviour of
+        ActionBase._execute_module().
+        """
+        return ' '.join(
+            '%s=%s' % (key, shlex_quote(str(self.args[key])))
+            for key in self.args
+        ) + ' '  # Bug-for-bug :(
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/services.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/services.py
new file mode 100644
index 0000000000000000000000000000000000000000..52171903da5887f70fa157b9f9a459f0c9fa47db
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/services.py
@@ -0,0 +1,559 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+Classes in this file define Mitogen 'services' that run (initially) within the
+connection multiplexer process that is forked off the top-level controller
+process.
+
+Once a worker process connects to a multiplexer process
+(Connection._connect()), it communicates with these services to establish new
+connections, grant access to files by children, and register for notification
+when a child has completed a job.
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+import os
+import os.path
+import sys
+import threading
+
+import ansible.constants
+
+import mitogen
+import mitogen.service
+import mitogen.utils
+import ansible_mitogen.loaders
+import ansible_mitogen.module_finder
+import ansible_mitogen.target
+
+
+LOG = logging.getLogger(__name__)
+
+# Force load of plugin to ensure ConfigManager has definitions loaded. Done
+# during module import to ensure a single-threaded environment; PluginLoader
+# is not thread-safe.
+ansible_mitogen.loaders.shell_loader.get('sh')
+
+
+if sys.version_info[0] == 3:
+    def reraise(tp, value, tb):
+        if value is None:
+            value = tp()
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+else:
+    exec(
+        "def reraise(tp, value, tb=None):\n"
+        "    raise tp, value, tb\n"
+     )
+
+
+def _get_candidate_temp_dirs():
+    try:
+        # >=2.5
+        options = ansible.constants.config.get_plugin_options('shell', 'sh')
+        remote_tmp = options.get('remote_tmp') or ansible.constants.DEFAULT_REMOTE_TMP
+        system_tmpdirs = options.get('system_tmpdirs', ('/var/tmp', '/tmp'))
+    except AttributeError:
+        # 2.3
+        remote_tmp = ansible.constants.DEFAULT_REMOTE_TMP
+        system_tmpdirs = ('/var/tmp', '/tmp')
+
+    return mitogen.utils.cast([remote_tmp] + list(system_tmpdirs))
+
+
+def key_from_dict(**kwargs):
+    """
+    Return a unique string representation of a dict as quickly as possible.
+    Used to generated deduplication keys from a request.
+    """
+    out = []
+    stack = [kwargs]
+    while stack:
+        obj = stack.pop()
+        if isinstance(obj, dict):
+            stack.extend(sorted(obj.items()))
+        elif isinstance(obj, (list, tuple)):
+            stack.extend(obj)
+        else:
+            out.append(str(obj))
+    return ''.join(out)
+
+
+class Error(Exception):
+    pass
+
+
+class ContextService(mitogen.service.Service):
+    """
+    Used by workers to fetch the single Context instance corresponding to a
+    connection configuration, creating the matching connection if it does not
+    exist.
+
+    For connection methods and their parameters, see:
+        https://mitogen.readthedocs.io/en/latest/api.html#context-factories
+
+    This concentrates connections in the top-level process, which may become a
+    bottleneck. The bottleneck can be removed using per-CPU connection
+    processes and arranging for the worker to select one according to a hash of
+    the connection parameters (sharding).
+    """
+    max_interpreters = int(os.getenv('MITOGEN_MAX_INTERPRETERS', '20'))
+
+    def __init__(self, *args, **kwargs):
+        super(ContextService, self).__init__(*args, **kwargs)
+        self._lock = threading.Lock()
+        #: Records the :meth:`get` result dict for successful calls, returned
+        #: for identical subsequent calls. Keyed by :meth:`key_from_dict`.
+        self._response_by_key = {}
+        #: List of :class:`mitogen.core.Latch` awaiting the result for a
+        #: particular key.
+        self._latches_by_key = {}
+        #: Mapping of :class:`mitogen.core.Context` -> reference count. Each
+        #: call to :meth:`get` increases this by one. Calls to :meth:`put`
+        #: decrease it by one.
+        self._refs_by_context = {}
+        #: List of contexts in creation order by via= parameter. When
+        #: :attr:`max_interpreters` is reached, the most recently used context
+        #: is destroyed to make room for any additional context.
+        self._lru_by_via = {}
+        #: :func:`key_from_dict` result by Context.
+        self._key_by_context = {}
+        #: Mapping of Context -> parent Context
+        self._via_by_context = {}
+
+    @mitogen.service.expose(mitogen.service.AllowParents())
+    @mitogen.service.arg_spec({
+        'stack': list,
+    })
+    def reset(self, stack):
+        """
+        Return a reference, forcing close and discard of the underlying
+        connection. Used for 'meta: reset_connection' or when some other error
+        is detected.
+
+        :returns:
+            :data:`True` if a connection was found to discard, otherwise
+            :data:`False`.
+        """
+        LOG.debug('%r.reset(%r)', self, stack)
+
+        l = mitogen.core.Latch()
+        context = None
+        with self._lock:
+            for i, spec in enumerate(stack):
+                key = key_from_dict(via=context, **spec)
+                response = self._response_by_key.get(key)
+                if response is None:
+                    LOG.debug('%r: could not find connection to shut down; '
+                              'failed at hop %d', self, i)
+                    return False
+
+                context = response['context']
+
+            mitogen.core.listen(context, 'disconnect', l.put)
+            self._shutdown_unlocked(context)
+
+        # The timeout below is to turn a hang into a crash in case there is any
+        # possible race between 'disconnect' signal subscription, and the child
+        # abruptly disconnecting.
+        l.get(timeout=30.0)
+        return True
+
+    @mitogen.service.expose(mitogen.service.AllowParents())
+    @mitogen.service.arg_spec({
+        'context': mitogen.core.Context
+    })
+    def put(self, context):
+        """
+        Return a reference, making it eligable for recycling once its reference
+        count reaches zero.
+        """
+        LOG.debug('decrementing reference count for %r', context)
+        self._lock.acquire()
+        try:
+            if self._refs_by_context.get(context, 0) == 0:
+                LOG.warning('%r.put(%r): refcount was 0. shutdown_all called?',
+                            self, context)
+                return
+            self._refs_by_context[context] -= 1
+        finally:
+            self._lock.release()
+
+    def _produce_response(self, key, response):
+        """
+        Reply to every waiting request matching a configuration key with a
+        response dictionary, deleting the list of waiters when done.
+
+        :param str key:
+            Result of :meth:`key_from_dict`
+        :param dict response:
+            Response dictionary
+        :returns:
+            Number of waiters that were replied to.
+        """
+        self._lock.acquire()
+        try:
+            latches = self._latches_by_key.pop(key)
+            count = len(latches)
+            for latch in latches:
+                latch.put(response)
+        finally:
+            self._lock.release()
+        return count
+
+    def _forget_context_unlocked(self, context):
+        key = self._key_by_context.get(context)
+        if key is None:
+            LOG.debug('%r: attempt to forget unknown %r', self, context)
+            return
+
+        self._response_by_key.pop(key, None)
+        self._latches_by_key.pop(key, None)
+        self._key_by_context.pop(context, None)
+        self._refs_by_context.pop(context, None)
+        self._via_by_context.pop(context, None)
+        self._lru_by_via.pop(context, None)
+
+    def _shutdown_unlocked(self, context, lru=None, new_context=None):
+        """
+        Arrange for `context` to be shut down, and optionally add `new_context`
+        to the LRU list while holding the lock.
+        """
+        LOG.info('%r._shutdown_unlocked(): shutting down %r', self, context)
+        context.shutdown()
+        via = self._via_by_context.get(context)
+        if via:
+            lru = self._lru_by_via.get(via)
+            if lru:
+                if context in lru:
+                    lru.remove(context)
+                if new_context:
+                    lru.append(new_context)
+        self._forget_context_unlocked(context)
+
+    def _update_lru_unlocked(self, new_context, spec, via):
+        """
+        Update the LRU ("MRU"?) list associated with the connection described
+        by `kwargs`, destroying the most recently created context if the list
+        is full. Finally add `new_context` to the list.
+        """
+        self._via_by_context[new_context] = via
+
+        lru = self._lru_by_via.setdefault(via, [])
+        if len(lru) < self.max_interpreters:
+            lru.append(new_context)
+            return
+
+        for context in reversed(lru):
+            if self._refs_by_context[context] == 0:
+                break
+        else:
+            LOG.warning('via=%r reached maximum number of interpreters, '
+                        'but they are all marked as in-use.', via)
+            return
+
+        self._shutdown_unlocked(context, lru=lru, new_context=new_context)
+
+    def _update_lru(self, new_context, spec, via):
+        self._lock.acquire()
+        try:
+            self._update_lru_unlocked(new_context, spec, via)
+        finally:
+            self._lock.release()
+
+    @mitogen.service.expose(mitogen.service.AllowParents())
+    def dump(self):
+        """
+        For testing, return a list of dicts describing every currently
+        connected context.
+        """
+        return [
+            {
+                'context_name': context.name,
+                'via': getattr(self._via_by_context.get(context),
+                               'name', None),
+                'refs': self._refs_by_context.get(context),
+            }
+            for context, key in sorted(self._key_by_context.items(),
+                                       key=lambda c_k: c_k[0].context_id)
+        ]
+
+    @mitogen.service.expose(mitogen.service.AllowParents())
+    def shutdown_all(self):
+        """
+        For testing use, arrange for all connections to be shut down.
+        """
+        self._lock.acquire()
+        try:
+            for context in list(self._key_by_context):
+                self._shutdown_unlocked(context)
+        finally:
+            self._lock.release()
+
+    def _on_context_disconnect(self, context):
+        """
+        Respond to Context disconnect event by deleting any record of the no
+        longer reachable context.  This method runs in the Broker thread and
+        must not to block.
+        """
+        self._lock.acquire()
+        try:
+            LOG.info('%r: Forgetting %r due to stream disconnect', self, context)
+            self._forget_context_unlocked(context)
+        finally:
+            self._lock.release()
+
+    ALWAYS_PRELOAD = (
+        'ansible.module_utils.basic',
+        'ansible.module_utils.json_utils',
+        'ansible.release',
+        'ansible_mitogen.runner',
+        'ansible_mitogen.target',
+        'mitogen.fork',
+        'mitogen.service',
+    )
+
+    def _send_module_forwards(self, context):
+        if hasattr(self.router.responder, 'forward_modules'):
+            self.router.responder.forward_modules(context, self.ALWAYS_PRELOAD)
+
+    _candidate_temp_dirs = None
+
+    def _get_candidate_temp_dirs(self):
+        """
+        Return a list of locations to try to create the single temporary
+        directory used by the run. This simply caches the (expensive) plugin
+        load of :func:`_get_candidate_temp_dirs`.
+        """
+        if self._candidate_temp_dirs is None:
+            self._candidate_temp_dirs = _get_candidate_temp_dirs()
+        return self._candidate_temp_dirs
+
+    def _connect(self, key, spec, via=None):
+        """
+        Actual connect implementation. Arranges for the Mitogen connection to
+        be created and enqueues an asynchronous call to start the forked task
+        parent in the remote context.
+
+        :param key:
+            Deduplication key representing the connection configuration.
+        :param spec:
+            Connection specification.
+        :returns:
+            Dict like::
+
+                {
+                    'context': mitogen.core.Context or None,
+                    'via': mitogen.core.Context or None,
+                    'init_child_result': {
+                        'fork_context': mitogen.core.Context,
+                        'home_dir': str or None,
+                    },
+                    'msg': str or None
+                }
+
+            Where `context` is a reference to the newly constructed context,
+            `init_child_result` is the result of executing
+            :func:`ansible_mitogen.target.init_child` in that context, `msg` is
+            an error message and the remaining fields are :data:`None`, or
+            `msg` is :data:`None` and the remaining fields are set.
+        """
+        try:
+            method = getattr(self.router, spec['method'])
+        except AttributeError:
+            raise Error('unsupported method: %(method)s' % spec)
+
+        context = method(via=via, unidirectional=True, **spec['kwargs'])
+        if via and spec.get('enable_lru'):
+            self._update_lru(context, spec, via)
+
+        # Forget the context when its disconnect event fires.
+        mitogen.core.listen(context, 'disconnect',
+            lambda: self._on_context_disconnect(context))
+
+        self._send_module_forwards(context)
+        init_child_result = context.call(
+            ansible_mitogen.target.init_child,
+            log_level=LOG.getEffectiveLevel(),
+            candidate_temp_dirs=self._get_candidate_temp_dirs(),
+        )
+
+        if os.environ.get('MITOGEN_DUMP_THREAD_STACKS'):
+            from mitogen import debug
+            context.call(debug.dump_to_logger)
+
+        self._key_by_context[context] = key
+        self._refs_by_context[context] = 0
+        return {
+            'context': context,
+            'via': via,
+            'init_child_result': init_child_result,
+            'msg': None,
+        }
+
+    def _wait_or_start(self, spec, via=None):
+        latch = mitogen.core.Latch()
+        key = key_from_dict(via=via, **spec)
+        self._lock.acquire()
+        try:
+            response = self._response_by_key.get(key)
+            if response is not None:
+                self._refs_by_context[response['context']] += 1
+                latch.put(response)
+                return latch
+
+            latches = self._latches_by_key.setdefault(key, [])
+            first = len(latches) == 0
+            latches.append(latch)
+        finally:
+            self._lock.release()
+
+        if first:
+            # I'm the first requestee, so I will create the connection.
+            try:
+                response = self._connect(key, spec, via=via)
+                count = self._produce_response(key, response)
+                # Only record the response for non-error results.
+                self._response_by_key[key] = response
+                # Set the reference count to the number of waiters.
+                self._refs_by_context[response['context']] += count
+            except Exception:
+                self._produce_response(key, sys.exc_info())
+
+        return latch
+
+    disconnect_msg = (
+        'Channel was disconnected while connection attempt was in progress; '
+        'this may be caused by an abnormal Ansible exit, or due to an '
+        'unreliable target.'
+    )
+
+    @mitogen.service.expose(mitogen.service.AllowParents())
+    @mitogen.service.arg_spec({
+        'stack': list
+    })
+    def get(self, stack):
+        """
+        Return a Context referring to an established connection with the given
+        configuration, establishing new connections as necessary.
+
+        :param list stack:
+            Connection descriptions. Each element is a dict containing 'method'
+            and 'kwargs' keys describing the Router method and arguments.
+            Subsequent elements are proxied via the previous.
+
+        :returns dict:
+            * context: mitogen.parent.Context or None.
+            * init_child_result: Result of :func:`init_child`.
+            * msg: StreamError exception text or None.
+            * method_name: string failing method name.
+        """
+        via = None
+        for spec in stack:
+            try:
+                result = self._wait_or_start(spec, via=via).get()
+                if isinstance(result, tuple):  # exc_info()
+                    reraise(*result)
+                via = result['context']
+            except mitogen.core.ChannelError:
+                return {
+                    'context': None,
+                    'init_child_result': None,
+                    'method_name': spec['method'],
+                    'msg': self.disconnect_msg,
+                }
+            except mitogen.core.StreamError as e:
+                return {
+                    'context': None,
+                    'init_child_result': None,
+                    'method_name': spec['method'],
+                    'msg': str(e),
+                }
+
+        return result
+
+
+class ModuleDepService(mitogen.service.Service):
+    """
+    Scan a new-style module and produce a cached mapping of module_utils names
+    to their resolved filesystem paths.
+    """
+    invoker_class = mitogen.service.SerializedInvoker
+
+    def __init__(self, *args, **kwargs):
+        super(ModuleDepService, self).__init__(*args, **kwargs)
+        self._cache = {}
+
+    def _get_builtin_names(self, builtin_path, resolved):
+        return [
+            mitogen.core.to_text(fullname)
+            for fullname, path, is_pkg in resolved
+            if os.path.abspath(path).startswith(builtin_path)
+        ]
+
+    def _get_custom_tups(self, builtin_path, resolved):
+        return [
+            (mitogen.core.to_text(fullname),
+             mitogen.core.to_text(path),
+             is_pkg)
+            for fullname, path, is_pkg in resolved
+            if not os.path.abspath(path).startswith(builtin_path)
+        ]
+
+    @mitogen.service.expose(policy=mitogen.service.AllowParents())
+    @mitogen.service.arg_spec({
+        'module_name': mitogen.core.UnicodeType,
+        'module_path': mitogen.core.FsPathTypes,
+        'search_path': tuple,
+        'builtin_path': mitogen.core.FsPathTypes,
+        'context': mitogen.core.Context,
+    })
+    def scan(self, module_name, module_path, search_path, builtin_path, context):
+        key = (module_name, search_path)
+        if key not in self._cache:
+            resolved = ansible_mitogen.module_finder.scan(
+                module_name=module_name,
+                module_path=module_path,
+                search_path=tuple(search_path) + (builtin_path,),
+            )
+            builtin_path = os.path.abspath(builtin_path)
+            builtin = self._get_builtin_names(builtin_path, resolved)
+            custom = self._get_custom_tups(builtin_path, resolved)
+            self._cache[key] = {
+                'builtin': builtin,
+                'custom': custom,
+            }
+        return self._cache[key]
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/strategy.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..d82e61120b583916bf9753d36e97351ab334ed75
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/strategy.py
@@ -0,0 +1,373 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import distutils.version
+import os
+import signal
+import threading
+
+try:
+    import setproctitle
+except ImportError:
+    setproctitle = None
+
+import mitogen.core
+import ansible_mitogen.affinity
+import ansible_mitogen.loaders
+import ansible_mitogen.mixins
+import ansible_mitogen.process
+
+import ansible
+import ansible.executor.process.worker
+
+try:
+    # 2.8+ has a standardized "unset" object.
+    from ansible.utils.sentinel import Sentinel
+except ImportError:
+    Sentinel = None
+
+
+ANSIBLE_VERSION_MIN = (2, 3)
+ANSIBLE_VERSION_MAX = (2, 9)
+NEW_VERSION_MSG = (
+    "Your Ansible version (%s) is too recent. The most recent version\n"
+    "supported by Mitogen for Ansible is %s.x. Please check the Mitogen\n"
+    "release notes to see if a new version is available, otherwise\n"
+    "subscribe to the corresponding GitHub issue to be notified when\n"
+    "support becomes available.\n"
+    "\n"
+    "    https://mitogen.rtfd.io/en/latest/changelog.html\n"
+    "    https://github.com/dw/mitogen/issues/\n"
+)
+OLD_VERSION_MSG = (
+    "Your version of Ansible (%s) is too old. The oldest version supported by "
+    "Mitogen for Ansible is %s."
+)
+
+
+def _assert_supported_release():
+    """
+    Throw AnsibleError with a descriptive message in case of being loaded into
+    an unsupported Ansible release.
+    """
+    v = ansible.__version__
+    if not isinstance(v, tuple):
+        v = tuple(distutils.version.LooseVersion(v).version)
+
+    if v[:2] < ANSIBLE_VERSION_MIN:
+        raise ansible.errors.AnsibleError(
+            OLD_VERSION_MSG % (v, ANSIBLE_VERSION_MIN)
+        )
+
+    if v[:2] > ANSIBLE_VERSION_MAX:
+        raise ansible.errors.AnsibleError(
+            NEW_VERSION_MSG % (ansible.__version__, ANSIBLE_VERSION_MAX)
+        )
+
+
+def _patch_awx_callback():
+    """
+    issue #400: AWX loads a display callback that suffers from thread-safety
+    issues. Detect the presence of older AWX versions and patch the bug.
+    """
+    # AWX uses sitecustomize.py to force-load this package. If it exists, we're
+    # running under AWX.
+    try:
+        from awx_display_callback.events import EventContext
+        from awx_display_callback.events import event_context
+    except ImportError:
+        return
+
+    if hasattr(EventContext(), '_local'):
+        # Patched version.
+        return
+
+    def patch_add_local(self, **kwargs):
+        tls = vars(self._local)
+        ctx = tls.setdefault('_ctx', {})
+        ctx.update(kwargs)
+
+    EventContext._local = threading.local()
+    EventContext.add_local = patch_add_local
+
+_patch_awx_callback()
+
+
+def wrap_action_loader__get(name, *args, **kwargs):
+    """
+    While the mitogen strategy is active, trap action_loader.get() calls,
+    augmenting any fetched class with ActionModuleMixin, which replaces various
+    helper methods inherited from ActionBase with implementations that avoid
+    the use of shell fragments wherever possible.
+
+    This is used instead of static subclassing as it generalizes to third party
+    action plugins outside the Ansible tree.
+    """
+    get_kwargs = {'class_only': True}
+    if name in ('fetch',):
+        name = 'mitogen_' + name
+    if ansible.__version__ >= '2.8':
+        get_kwargs['collection_list'] = kwargs.pop('collection_list', None)
+
+    klass = ansible_mitogen.loaders.action_loader__get(name, **get_kwargs)
+    if klass:
+        bases = (ansible_mitogen.mixins.ActionModuleMixin, klass)
+        adorned_klass = type(str(name), bases, {})
+        if kwargs.get('class_only'):
+            return adorned_klass
+        return adorned_klass(*args, **kwargs)
+
+
+REDIRECTED_CONNECTION_PLUGINS = (
+    'buildah',
+    'docker',
+    'kubectl',
+    'jail',
+    'local',
+    'lxc',
+    'lxd',
+    'machinectl',
+    'setns',
+    'ssh',
+)
+
+
+def wrap_connection_loader__get(name, *args, **kwargs):
+    """
+    While a Mitogen strategy is active, rewrite connection_loader.get() calls
+    for some transports into requests for a compatible Mitogen transport.
+    """
+    if name in REDIRECTED_CONNECTION_PLUGINS:
+        name = 'mitogen_' + name
+
+    return ansible_mitogen.loaders.connection_loader__get(name, *args, **kwargs)
+
+
+def wrap_worker__run(self):
+    """
+    While a Mitogen strategy is active, trap WorkerProcess.run() calls and use
+    the opportunity to set the worker's name in the process list and log
+    output, activate profiling if requested, and bind the worker to a specific
+    CPU.
+    """
+    if setproctitle:
+        setproctitle.setproctitle('worker:%s task:%s' % (
+            self._host.name,
+            self._task.action,
+        ))
+
+    # Ignore parent's attempts to murder us when we still need to write
+    # profiling output.
+    if mitogen.core._profile_hook.__name__ != '_profile_hook':
+        signal.signal(signal.SIGTERM, signal.SIG_IGN)
+
+    ansible_mitogen.logging.set_process_name('task')
+    ansible_mitogen.affinity.policy.assign_worker()
+    return mitogen.core._profile_hook('WorkerProcess',
+        lambda: worker__run(self)
+    )
+
+
+class AnsibleWrappers(object):
+    """
+    Manage add/removal of various Ansible runtime hooks.
+    """
+    def _add_plugin_paths(self):
+        """
+        Add the Mitogen plug-in directories to the ModuleLoader path, avoiding
+        the need for manual configuration.
+        """
+        base_dir = os.path.join(os.path.dirname(__file__), 'plugins')
+        ansible_mitogen.loaders.connection_loader.add_directory(
+            os.path.join(base_dir, 'connection')
+        )
+        ansible_mitogen.loaders.action_loader.add_directory(
+            os.path.join(base_dir, 'action')
+        )
+
+    def _install_wrappers(self):
+        """
+        Install our PluginLoader monkey patches and update global variables
+        with references to the real functions.
+        """
+        ansible_mitogen.loaders.action_loader.get = wrap_action_loader__get
+        ansible_mitogen.loaders.connection_loader.get = wrap_connection_loader__get
+
+        global worker__run
+        worker__run = ansible.executor.process.worker.WorkerProcess.run
+        ansible.executor.process.worker.WorkerProcess.run = wrap_worker__run
+
+    def _remove_wrappers(self):
+        """
+        Uninstall the PluginLoader monkey patches.
+        """
+        ansible_mitogen.loaders.action_loader.get = (
+            ansible_mitogen.loaders.action_loader__get
+        )
+        ansible_mitogen.loaders.connection_loader.get = (
+            ansible_mitogen.loaders.connection_loader__get
+        )
+        ansible.executor.process.worker.WorkerProcess.run = worker__run
+
+    def install(self):
+        self._add_plugin_paths()
+        self._install_wrappers()
+
+    def remove(self):
+        self._remove_wrappers()
+
+
+class StrategyMixin(object):
+    """
+    This mix-in enhances any built-in strategy by arranging for an appropriate
+    WorkerModel instance to be constructed as necessary, or for the existing
+    one to be reused.
+
+    The WorkerModel in turn arranges for a connection multiplexer to be started
+    somewhere (by default in an external process), and for WorkerProcesses to
+    grow support for using those top-level services to communicate with remote
+    hosts.
+
+    Mitogen:
+
+        A private Broker IO multiplexer thread is created to dispatch IO
+        between the local Router and any connected streams, including streams
+        connected to Ansible WorkerProcesses, and SSH commands implementing
+        connections to remote machines.
+
+        A Router is created that implements message dispatch to any locally
+        registered handlers, and message routing for remote streams. Router is
+        the junction point through which WorkerProceses and remote SSH contexts
+        can communicate.
+
+        Router additionally adds message handlers for a variety of base
+        services, review the Standard Handles section of the How It Works guide
+        in the documentation.
+
+        A ContextService is installed as a message handler in the connection
+        mutliplexer subprocess and run on a private thread. It is responsible
+        for accepting requests to establish new SSH connections from worker
+        processes, and ensuring precisely one connection exists and is reused
+        for subsequent playbook steps. The service presently runs in a single
+        thread, so to begin with, new SSH connections are serialized.
+
+        Finally a mitogen.unix listener is created through which WorkerProcess
+        can establish a connection back into the connection multiplexer, in
+        order to avail of ContextService. A UNIX listener socket is necessary
+        as there is no more sane mechanism to arrange for IPC between the
+        Router in the connection multiplexer, and the corresponding Router in
+        the worker process.
+
+    Ansible:
+
+        PluginLoader monkey patches are installed to catch attempts to create
+        connection and action plug-ins.
+
+        For connection plug-ins, if the desired method is "local" or "ssh", it
+        is redirected to one of the "mitogen_*" connection plug-ins. That
+        plug-in implements communication via a UNIX socket connection to the
+        connection multiplexer process, and uses ContextService running there
+        to establish a persistent connection to the target.
+
+        For action plug-ins, the original class is looked up as usual, but a
+        new subclass is created dynamically in order to mix-in
+        ansible_mitogen.target.ActionModuleMixin, which overrides many of the
+        methods usually inherited from ActionBase in order to replace them with
+        pure-Python equivalents that avoid the use of shell.
+
+        In particular, _execute_module() is overridden with an implementation
+        that uses ansible_mitogen.target.run_module() executed in the target
+        Context. run_module() implements module execution by importing the
+        module as if it were a normal Python module, and capturing its output
+        in the remote process. Since the Mitogen module loader is active in the
+        remote process, all the heavy lifting of transferring the action module
+        and its dependencies are automatically handled by Mitogen.
+    """
+
+    def _queue_task(self, host, task, task_vars, play_context):
+        """
+        Many PluginLoader caches are defective as they are only populated in
+        the ephemeral WorkerProcess. Touch each plug-in path before forking to
+        ensure all workers receive a hot cache.
+        """
+        ansible_mitogen.loaders.module_loader.find_plugin(
+            name=task.action,
+            mod_type='',
+        )
+        ansible_mitogen.loaders.action_loader.get(
+            name=task.action,
+            class_only=True,
+        )
+        if play_context.connection is not Sentinel:
+            # 2.8 appears to defer computing this until inside the worker.
+            # TODO: figure out where it has moved.
+            ansible_mitogen.loaders.connection_loader.get(
+                name=play_context.connection,
+                class_only=True,
+            )
+
+        return super(StrategyMixin, self)._queue_task(
+            host=host,
+            task=task,
+            task_vars=task_vars,
+            play_context=play_context,
+        )
+
+    def _get_worker_model(self):
+        """
+        In classic mode a single :class:`WorkerModel` exists, which manages
+        references and configuration of the associated connection multiplexer
+        process.
+        """
+        return ansible_mitogen.process.get_classic_worker_model()
+
+    def run(self, iterator, play_context, result=0):
+        """
+        Wrap :meth:`run` to ensure requisite infrastructure and modifications
+        are configured for the duration of the call.
+        """
+        _assert_supported_release()
+        wrappers = AnsibleWrappers()
+        self._worker_model = self._get_worker_model()
+        ansible_mitogen.process.set_worker_model(self._worker_model)
+        try:
+            self._worker_model.on_strategy_start()
+            try:
+                wrappers.install()
+                try:
+                    run = super(StrategyMixin, self).run
+                    return mitogen.core._profile_hook('Strategy',
+                        lambda: run(iterator, play_context)
+                    )
+                finally:
+                    wrappers.remove()
+            finally:
+                self._worker_model.on_strategy_complete()
+        finally:
+            ansible_mitogen.process.set_worker_model(None)
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/target.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/target.py
new file mode 100644
index 0000000000000000000000000000000000000000..652b5adcc0850f914a88418de61260ad59343c68
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/target.py
@@ -0,0 +1,777 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+Helper functions intended to be executed on the target. These are entrypoints
+for file transfer, module execution and sundry bits like changing file modes.
+"""
+
+import errno
+import grp
+import operator
+import os
+import pwd
+import re
+import signal
+import stat
+import subprocess
+import sys
+import tempfile
+import traceback
+import types
+
+# Absolute imports for <2.5.
+logging = __import__('logging')
+
+import mitogen.core
+import mitogen.fork
+import mitogen.parent
+import mitogen.service
+from mitogen.core import b
+
+try:
+    import json
+except ImportError:
+    import simplejson as json
+
+try:
+    reduce
+except NameError:
+    # Python 3.x.
+    from functools import reduce
+
+try:
+    BaseException
+except NameError:
+    # Python 2.4
+    BaseException = Exception
+
+
+# Ansible since PR #41749 inserts "import __main__" into
+# ansible.module_utils.basic. Mitogen's importer will refuse such an import, so
+# we must setup a fake "__main__" before that module is ever imported. The
+# str() is to cast Unicode to bytes on Python 2.6.
+if not sys.modules.get(str('__main__')):
+    sys.modules[str('__main__')] = types.ModuleType(str('__main__'))
+
+import ansible.module_utils.json_utils
+import ansible_mitogen.runner
+
+
+LOG = logging.getLogger(__name__)
+
+MAKE_TEMP_FAILED_MSG = (
+    u"Unable to find a useable temporary directory. This likely means no\n"
+    u"system-supplied TMP directory can be written to, or all directories\n"
+    u"were mounted on 'noexec' filesystems.\n"
+    u"\n"
+    u"The following paths were tried:\n"
+    u"    %(paths)s\n"
+    u"\n"
+    u"Please check '-vvv' output for a log of individual path errors."
+)
+
+# Python 2.4/2.5 cannot support fork+threads whatsoever, it doesn't even fix up
+# interpreter state. So 2.4/2.5 interpreters start .local() contexts for
+# isolation instead. Since we don't have any crazy memory sharing problems to
+# avoid, there is no virginal fork parent either. The child is started directly
+# from the login/become process. In future this will be default everywhere,
+# fork is brainwrong from the stone age.
+FORK_SUPPORTED = sys.version_info >= (2, 6)
+
+#: Initialized to an econtext.parent.Context pointing at a pristine fork of
+#: the target Python interpreter before it executes any code or imports.
+_fork_parent = None
+
+#: Set by :func:`init_child` to the name of a writeable and executable
+#: temporary directory accessible by the active user account.
+good_temp_dir = None
+
+
+def subprocess__Popen__close_fds(self, but):
+    """
+    issue #362, #435: subprocess.Popen(close_fds=True) aka.
+    AnsibleModule.run_command() loops the entire FD space on Python<3.2.
+    CentOS>5 ships with 1,048,576 FDs by default, resulting in huge (>500ms)
+    latency starting children. Therefore replace Popen._close_fds on Linux with
+    a version that is O(fds) rather than O(_SC_OPEN_MAX).
+    """
+    try:
+        names = os.listdir(u'/proc/self/fd')
+    except OSError:
+        # May fail if acting on a container that does not have /proc mounted.
+        self._original_close_fds(but)
+        return
+
+    for name in names:
+        if not name.isdigit():
+            continue
+
+        fd = int(name, 10)
+        if fd > 2 and fd != but:
+            try:
+                os.close(fd)
+            except OSError:
+                pass
+
+
+if (
+    sys.platform.startswith(u'linux') and
+    sys.version < u'3.0' and
+    hasattr(subprocess.Popen, u'_close_fds') and
+    not mitogen.is_master
+):
+    subprocess.Popen._original_close_fds = subprocess.Popen._close_fds
+    subprocess.Popen._close_fds = subprocess__Popen__close_fds
+
+
+def get_small_file(context, path):
+    """
+    Basic in-memory caching module fetcher. This generates one roundtrip for
+    every previously unseen file, so it is only a temporary solution.
+
+    :param context:
+        Context we should direct FileService requests to. For now (and probably
+        forever) this is just the top-level Mitogen connection manager process.
+    :param path:
+        Path to fetch from FileService, must previously have been registered by
+        a privileged context using the `register` command.
+    :returns:
+        Bytestring file data.
+    """
+    pool = mitogen.service.get_or_create_pool(router=context.router)
+    service = pool.get_service(u'mitogen.service.PushFileService')
+    return service.get(path)
+
+
+def transfer_file(context, in_path, out_path, sync=False, set_owner=False):
+    """
+    Streamily download a file from the connection multiplexer process in the
+    controller.
+
+    :param mitogen.core.Context context:
+        Reference to the context hosting the FileService that will transmit the
+        file.
+    :param bytes in_path:
+        FileService registered name of the input file.
+    :param bytes out_path:
+        Name of the output path on the local disk.
+    :param bool sync:
+        If :data:`True`, ensure the file content and metadat are fully on disk
+        before renaming the temporary file over the existing file. This should
+        ensure in the case of system crash, either the entire old or new file
+        are visible post-reboot.
+    :param bool set_owner:
+        If :data:`True`, look up the metadata username and group on the local
+        system and file the file owner using :func:`os.fchmod`.
+    """
+    out_path = os.path.abspath(out_path)
+    fd, tmp_path = tempfile.mkstemp(suffix='.tmp',
+                                    prefix='.ansible_mitogen_transfer-',
+                                    dir=os.path.dirname(out_path))
+    fp = os.fdopen(fd, 'wb', mitogen.core.CHUNK_SIZE)
+    LOG.debug('transfer_file(%r) temporary file: %s', out_path, tmp_path)
+
+    try:
+        try:
+            ok, metadata = mitogen.service.FileService.get(
+                context=context,
+                path=in_path,
+                out_fp=fp,
+            )
+            if not ok:
+                raise IOError('transfer of %r was interrupted.' % (in_path,))
+
+            set_file_mode(tmp_path, metadata['mode'], fd=fp.fileno())
+            if set_owner:
+                set_file_owner(tmp_path, metadata['owner'], metadata['group'],
+                               fd=fp.fileno())
+        finally:
+            fp.close()
+
+        if sync:
+            os.fsync(fp.fileno())
+        os.rename(tmp_path, out_path)
+    except BaseException:
+        os.unlink(tmp_path)
+        raise
+
+    os.utime(out_path, (metadata['atime'], metadata['mtime']))
+
+
+def prune_tree(path):
+    """
+    Like shutil.rmtree(), but log errors rather than discard them, and do not
+    waste multiple os.stat() calls discovering whether the object can be
+    deleted, just try deleting it instead.
+    """
+    try:
+        os.unlink(path)
+        return
+    except OSError:
+        e = sys.exc_info()[1]
+        if not (os.path.isdir(path) and
+                e.args[0] in (errno.EPERM, errno.EISDIR)):
+            LOG.error('prune_tree(%r): %s', path, e)
+            return
+
+    try:
+        # Ensure write access for readonly directories. Ignore error in case
+        # path is on a weird filesystem (e.g. vfat).
+        os.chmod(path, int('0700', 8))
+    except OSError:
+        e = sys.exc_info()[1]
+        LOG.warning('prune_tree(%r): %s', path, e)
+
+    try:
+        for name in os.listdir(path):
+            if name not in ('.', '..'):
+                prune_tree(os.path.join(path, name))
+        os.rmdir(path)
+    except OSError:
+        e = sys.exc_info()[1]
+        LOG.error('prune_tree(%r): %s', path, e)
+
+
+def is_good_temp_dir(path):
+    """
+    Return :data:`True` if `path` can be used as a temporary directory, logging
+    any failures that may cause it to be unsuitable. If the directory doesn't
+    exist, we attempt to create it using :func:`os.makedirs`.
+    """
+    if not os.path.exists(path):
+        try:
+            os.makedirs(path, mode=int('0700', 8))
+        except OSError:
+            e = sys.exc_info()[1]
+            LOG.debug('temp dir %r unusable: did not exist and attempting '
+                      'to create it failed: %s', path, e)
+            return False
+
+    try:
+        tmp = tempfile.NamedTemporaryFile(
+            prefix='ansible_mitogen_is_good_temp_dir',
+            dir=path,
+        )
+    except (OSError, IOError):
+        e = sys.exc_info()[1]
+        LOG.debug('temp dir %r unusable: %s', path, e)
+        return False
+
+    try:
+        try:
+            os.chmod(tmp.name, int('0700', 8))
+        except OSError:
+            e = sys.exc_info()[1]
+            LOG.debug('temp dir %r unusable: chmod failed: %s', path, e)
+            return False
+
+        try:
+            # access(.., X_OK) is sufficient to detect noexec.
+            if not os.access(tmp.name, os.X_OK):
+                raise OSError('filesystem appears to be mounted noexec')
+        except OSError:
+            e = sys.exc_info()[1]
+            LOG.debug('temp dir %r unusable: %s', path, e)
+            return False
+    finally:
+        tmp.close()
+
+    return True
+
+
+def find_good_temp_dir(candidate_temp_dirs):
+    """
+    Given a list of candidate temp directories extracted from ``ansible.cfg``,
+    combine it with the Python-builtin list of candidate directories used by
+    :mod:`tempfile`, then iteratively try each until one is found that is both
+    writeable and executable.
+
+    :param list candidate_temp_dirs:
+        List of candidate $variable-expanded and tilde-expanded directory paths
+        that may be usable as a temporary directory.
+    """
+    paths = [os.path.expandvars(os.path.expanduser(p))
+             for p in candidate_temp_dirs]
+    paths.extend(tempfile._candidate_tempdir_list())
+
+    for path in paths:
+        if is_good_temp_dir(path):
+            LOG.debug('Selected temp directory: %r (from %r)', path, paths)
+            return path
+
+    raise IOError(MAKE_TEMP_FAILED_MSG % {
+        'paths': '\n    '.join(paths),
+    })
+
+
+@mitogen.core.takes_econtext
+def init_child(econtext, log_level, candidate_temp_dirs):
+    """
+    Called by ContextService immediately after connection; arranges for the
+    (presently) spotless Python interpreter to be forked, where the newly
+    forked interpreter becomes the parent of any newly forked future
+    interpreters.
+
+    This is necessary to prevent modules that are executed in-process from
+    polluting the global interpreter state in a way that effects explicitly
+    isolated modules.
+
+    :param int log_level:
+        Logging package level active in the master.
+    :param list[str] candidate_temp_dirs:
+        List of $variable-expanded and tilde-expanded directory names to add to
+        candidate list of temporary directories.
+
+    :returns:
+        Dict like::
+
+            {
+                'fork_context': mitogen.core.Context or None,
+                'good_temp_dir': ...
+                'home_dir': str
+            }
+
+        Where `fork_context` refers to the newly forked 'fork parent' context
+        the controller will use to start forked jobs, and `home_dir` is the
+        home directory for the active user account.
+    """
+    # Copying the master's log level causes log messages to be filtered before
+    # they reach LogForwarder, thus reducing an influx of tiny messges waking
+    # the connection multiplexer process in the master.
+    LOG.setLevel(log_level)
+    logging.getLogger('ansible_mitogen').setLevel(log_level)
+
+    # issue #536: if the json module is available, remove simplejson from the
+    # importer whitelist to avoid confusing certain Ansible modules.
+    if json.__name__ == 'json':
+        econtext.importer.whitelist.remove('simplejson')
+
+    global _fork_parent
+    if FORK_SUPPORTED:
+        mitogen.parent.upgrade_router(econtext)
+        _fork_parent = econtext.router.fork()
+
+    global good_temp_dir
+    good_temp_dir = find_good_temp_dir(candidate_temp_dirs)
+
+    return {
+        u'fork_context': _fork_parent,
+        u'home_dir': mitogen.core.to_text(os.path.expanduser('~')),
+        u'good_temp_dir': good_temp_dir,
+    }
+
+
+@mitogen.core.takes_econtext
+def spawn_isolated_child(econtext):
+    """
+    For helper functions executed in the fork parent context, arrange for
+    the context's router to be upgraded as necessary and for a new child to be
+    prepared.
+
+    The actual fork occurs from the 'virginal fork parent', which does not have
+    any Ansible modules loaded prior to fork, to avoid conflicts resulting from
+    custom module_utils paths.
+    """
+    mitogen.parent.upgrade_router(econtext)
+    if FORK_SUPPORTED:
+        context = econtext.router.fork()
+    else:
+        context = econtext.router.local()
+    LOG.debug('create_fork_child() -> %r', context)
+    return context
+
+
+def run_module(kwargs):
+    """
+    Set up the process environment in preparation for running an Ansible
+    module. This monkey-patches the Ansible libraries in various places to
+    prevent it from trying to kill the process on completion, and to prevent it
+    from reading sys.stdin.
+    """
+    runner_name = kwargs.pop('runner_name')
+    klass = getattr(ansible_mitogen.runner, runner_name)
+    impl = klass(**mitogen.core.Kwargs(kwargs))
+    return impl.run()
+
+
+def _get_async_dir():
+    return os.path.expanduser(
+        os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
+    )
+
+
+class AsyncRunner(object):
+    def __init__(self, job_id, timeout_secs, started_sender, econtext, kwargs):
+        self.job_id = job_id
+        self.timeout_secs = timeout_secs
+        self.started_sender = started_sender
+        self.econtext = econtext
+        self.kwargs = kwargs
+        self._timed_out = False
+        self._init_path()
+
+    def _init_path(self):
+        async_dir = _get_async_dir()
+        if not os.path.exists(async_dir):
+            os.makedirs(async_dir)
+        self.path = os.path.join(async_dir, self.job_id)
+
+    def _update(self, dct):
+        """
+        Update an async job status file.
+        """
+        LOG.info('%r._update(%r, %r)', self, self.job_id, dct)
+        dct.setdefault('ansible_job_id', self.job_id)
+        dct.setdefault('data', '')
+
+        fp = open(self.path + '.tmp', 'w')
+        try:
+            fp.write(json.dumps(dct))
+        finally:
+            fp.close()
+        os.rename(self.path + '.tmp', self.path)
+
+    def _on_sigalrm(self, signum, frame):
+        """
+        Respond to SIGALRM (job timeout) by updating the job file and killing
+        the process.
+        """
+        msg = "Job reached maximum time limit of %d seconds." % (
+            self.timeout_secs,
+        )
+        self._update({
+            "failed": 1,
+            "finished": 1,
+            "msg": msg,
+        })
+        self._timed_out = True
+        self.econtext.broker.shutdown()
+
+    def _install_alarm(self):
+        signal.signal(signal.SIGALRM, self._on_sigalrm)
+        signal.alarm(self.timeout_secs)
+
+    def _run_module(self):
+        kwargs = dict(self.kwargs, **{
+            'detach': True,
+            'econtext': self.econtext,
+            'emulate_tty': False,
+        })
+        return run_module(kwargs)
+
+    def _parse_result(self, dct):
+        filtered, warnings = (
+            ansible.module_utils.json_utils.
+            _filter_non_json_lines(dct['stdout'])
+        )
+        result = json.loads(filtered)
+        result.setdefault('warnings', []).extend(warnings)
+        result['stderr'] = dct['stderr'] or result.get('stderr', '')
+        self._update(result)
+
+    def _run(self):
+        """
+        1. Immediately updates the status file to mark the job as started.
+        2. Installs a timer/signal handler to implement the time limit.
+        3. Runs as with run_module(), writing the result to the status file.
+
+        :param dict kwargs:
+            Runner keyword arguments.
+        :param str job_id:
+            String job ID.
+        :param int timeout_secs:
+            If >0, limit the task's maximum run time.
+        """
+        self._update({
+            'started': 1,
+            'finished': 0,
+            'pid': os.getpid()
+        })
+        self.started_sender.send(True)
+
+        if self.timeout_secs > 0:
+            self._install_alarm()
+
+        dct = self._run_module()
+        if not self._timed_out:
+            # After SIGALRM fires, there is a window between broker responding
+            # to shutdown() by killing the process, and work continuing on the
+            # main thread. If main thread was asleep in at least
+            # basic.py/select.select(), an EINTR will be raised. We want to
+            # discard that exception.
+            try:
+                self._parse_result(dct)
+            except Exception:
+                self._update({
+                    "failed": 1,
+                    "msg": traceback.format_exc(),
+                    "data": dct['stdout'],  # temporary notice only
+                    "stderr": dct['stderr']
+                })
+
+    def run(self):
+        try:
+            try:
+                self._run()
+            except Exception:
+                self._update({
+                    "failed": 1,
+                    "msg": traceback.format_exc(),
+                })
+        finally:
+            self.econtext.broker.shutdown()
+
+
+@mitogen.core.takes_econtext
+def run_module_async(kwargs, job_id, timeout_secs, started_sender, econtext):
+    """
+    Execute a module with its run status and result written to a file,
+    terminating on the process on completion. This function must run in a child
+    forked using :func:`create_fork_child`.
+
+    @param mitogen.core.Sender started_sender:
+        A sender that will receive :data:`True` once the job has reached a
+        point where its initial job file has been written. This is required to
+        avoid a race where an overly eager controller can check for a task
+        before it has reached that point in execution, which is possible at
+        least on Python 2.4, where forking is not available for async tasks.
+    """
+    arunner = AsyncRunner(
+        job_id,
+        timeout_secs,
+        started_sender,
+        econtext,
+        kwargs
+    )
+    arunner.run()
+
+
+def get_user_shell():
+    """
+    For commands executed directly via an SSH command-line, SSH looks up the
+    user's shell via getpwuid() and only defaults to /bin/sh if that field is
+    missing or empty.
+    """
+    try:
+        pw_shell = pwd.getpwuid(os.geteuid()).pw_shell
+    except KeyError:
+        pw_shell = None
+
+    return pw_shell or '/bin/sh'
+
+
+def exec_args(args, in_data='', chdir=None, shell=None, emulate_tty=False):
+    """
+    Run a command in a subprocess, emulating the argument handling behaviour of
+    SSH.
+
+    :param list[str]:
+        Argument vector.
+    :param bytes in_data:
+        Optional standard input for the command.
+    :param bool emulate_tty:
+        If :data:`True`, arrange for stdout and stderr to be merged into the
+        stdout pipe and for LF to be translated into CRLF, emulating the
+        behaviour of a TTY.
+    :return:
+        (return code, stdout bytes, stderr bytes)
+    """
+    LOG.debug('exec_args(%r, ..., chdir=%r)', args, chdir)
+    assert isinstance(args, list)
+
+    if emulate_tty:
+        stderr = subprocess.STDOUT
+    else:
+        stderr = subprocess.PIPE
+
+    proc = subprocess.Popen(
+        args=args,
+        stdout=subprocess.PIPE,
+        stderr=stderr,
+        stdin=subprocess.PIPE,
+        cwd=chdir,
+    )
+    stdout, stderr = proc.communicate(in_data)
+
+    if emulate_tty:
+        stdout = stdout.replace(b('\n'), b('\r\n'))
+    return proc.returncode, stdout, stderr or b('')
+
+
+def exec_command(cmd, in_data='', chdir=None, shell=None, emulate_tty=False):
+    """
+    Run a command in a subprocess, emulating the argument handling behaviour of
+    SSH.
+
+    :param bytes cmd:
+        String command line, passed to user's shell.
+    :param bytes in_data:
+        Optional standard input for the command.
+    :return:
+        (return code, stdout bytes, stderr bytes)
+    """
+    assert isinstance(cmd, mitogen.core.UnicodeType)
+    return exec_args(
+        args=[get_user_shell(), '-c', cmd],
+        in_data=in_data,
+        chdir=chdir,
+        shell=shell,
+        emulate_tty=emulate_tty,
+    )
+
+
+def read_path(path):
+    """
+    Fetch the contents of a filesystem `path` as bytes.
+    """
+    return open(path, 'rb').read()
+
+
+def set_file_owner(path, owner, group=None, fd=None):
+    if owner:
+        uid = pwd.getpwnam(owner).pw_uid
+    else:
+        uid = os.geteuid()
+
+    if group:
+        gid = grp.getgrnam(group).gr_gid
+    else:
+        gid = os.getegid()
+
+    if fd is not None and hasattr(os, 'fchown'):
+        os.fchown(fd, (uid, gid))
+    else:
+        # Python<2.6
+        os.chown(path, (uid, gid))
+
+
+def write_path(path, s, owner=None, group=None, mode=None,
+               utimes=None, sync=False):
+    """
+    Writes bytes `s` to a filesystem `path`.
+    """
+    path = os.path.abspath(path)
+    fd, tmp_path = tempfile.mkstemp(suffix='.tmp',
+                                    prefix='.ansible_mitogen_transfer-',
+                                    dir=os.path.dirname(path))
+    fp = os.fdopen(fd, 'wb', mitogen.core.CHUNK_SIZE)
+    LOG.debug('write_path(path=%r) temporary file: %s', path, tmp_path)
+
+    try:
+        try:
+            if mode:
+                set_file_mode(tmp_path, mode, fd=fp.fileno())
+            if owner or group:
+                set_file_owner(tmp_path, owner, group, fd=fp.fileno())
+            fp.write(s)
+        finally:
+            fp.close()
+
+        if sync:
+            os.fsync(fp.fileno())
+        os.rename(tmp_path, path)
+    except BaseException:
+        os.unlink(tmp_path)
+        raise
+
+    if utimes:
+        os.utime(path, utimes)
+
+
+CHMOD_CLAUSE_PAT = re.compile(r'([uoga]*)([+\-=])([ugo]|[rwx]*)')
+CHMOD_MASKS = {
+    'u': stat.S_IRWXU,
+    'g': stat.S_IRWXG,
+    'o': stat.S_IRWXO,
+    'a': (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO),
+}
+CHMOD_BITS = {
+    'u': {'r': stat.S_IRUSR, 'w': stat.S_IWUSR, 'x': stat.S_IXUSR},
+    'g': {'r': stat.S_IRGRP, 'w': stat.S_IWGRP, 'x': stat.S_IXGRP},
+    'o': {'r': stat.S_IROTH, 'w': stat.S_IWOTH, 'x': stat.S_IXOTH},
+    'a': {
+        'r': (stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH),
+        'w': (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH),
+        'x': (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
+    }
+}
+
+
+def apply_mode_spec(spec, mode):
+    """
+    Given a symbolic file mode change specification in the style of chmod(1)
+    `spec`, apply changes in the specification to the numeric file mode `mode`.
+    """
+    for clause in mitogen.core.to_text(spec).split(','):
+        match = CHMOD_CLAUSE_PAT.match(clause)
+        who, op, perms = match.groups()
+        for ch in who or 'a':
+            mask = CHMOD_MASKS[ch]
+            bits = CHMOD_BITS[ch]
+            cur_perm_bits = mode & mask
+            new_perm_bits = reduce(operator.or_, (bits[p] for p in perms), 0)
+            mode &= ~mask
+            if op == '=':
+                mode |= new_perm_bits
+            elif op == '+':
+                mode |= new_perm_bits | cur_perm_bits
+            else:
+                mode |= cur_perm_bits & ~new_perm_bits
+    return mode
+
+
+def set_file_mode(path, spec, fd=None):
+    """
+    Update the permissions of a file using the same syntax as chmod(1).
+    """
+    if isinstance(spec, int):
+        new_mode = spec
+    elif not mitogen.core.PY3 and isinstance(spec, long):
+        new_mode = spec
+    elif spec.isdigit():
+        new_mode = int(spec, 8)
+    else:
+        mode = os.stat(path).st_mode
+        new_mode = apply_mode_spec(spec, mode)
+
+    if fd is not None and hasattr(os, 'fchmod'):
+        os.fchmod(fd, new_mode)
+    else:
+        os.chmod(path, new_mode)
+
+
+def file_exists(path):
+    """
+    Return :data:`True` if `path` exists. This is a wrapper function over
+    :func:`os.path.exists`, since its implementation module varies across
+    Python versions.
+    """
+    return os.path.exists(path)
diff --git a/deployment/lib/mitogen-0.2.9/ansible_mitogen/transport_config.py b/deployment/lib/mitogen-0.2.9/ansible_mitogen/transport_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa4a16d0172848930e006bd4c612a0817cbbba79
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/ansible_mitogen/transport_config.py
@@ -0,0 +1,699 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+"""
+Mitogen extends Ansible's target configuration mechanism in several ways that
+require some care:
+
+* Per-task configurables in Ansible like ansible_python_interpreter are
+  connection-layer configurables in Mitogen. They must be extracted during each
+  task execution to form the complete connection-layer configuration.
+
+* Mitogen has extra configurables not supported by Ansible at all, such as
+  mitogen_ssh_debug_level. These are extracted the same way as
+  ansible_python_interpreter.
+
+* Mitogen allows connections to be delegated to other machines. Ansible has no
+  internal framework for this, and so Mitogen must figure out a delegated
+  connection configuration all on its own. It cannot reuse much of the Ansible
+  machinery for building a connection configuration, as that machinery is
+  deeply spread out and hard-wired to expect Ansible's usual mode of operation.
+
+For normal and delegate_to connections, Ansible's PlayContext is reused where
+possible to maximize compatibility, but for proxy hops, configurations are
+built up using the HostVars magic class to call VariableManager.get_vars()
+behind the scenes on our behalf. Where Ansible has multiple sources of a
+configuration item, for example, ansible_ssh_extra_args, Mitogen must (ideally
+perfectly) reproduce how Ansible arrives at its value, without using mechanisms
+that are hard-wired or change across Ansible versions.
+
+That is what this file is for. It exports two spec classes, one that takes all
+information from PlayContext, and another that takes (almost) all information
+from HostVars.
+"""
+
+import abc
+import os
+import ansible.utils.shlex
+import ansible.constants as C
+
+from ansible.module_utils.six import with_metaclass
+
+
+import mitogen.core
+
+
+def parse_python_path(s):
+    """
+    Given the string set for ansible_python_interpeter, parse it using shell
+    syntax and return an appropriate argument vector.
+    """
+    if s:
+        return ansible.utils.shlex.shlex_split(s)
+
+
+def optional_secret(value):
+    """
+    Wrap `value` in :class:`mitogen.core.Secret` if it is not :data:`None`,
+    otherwise return :data:`None`.
+    """
+    if value is not None:
+        return mitogen.core.Secret(value)
+
+
+def first_true(it, default=None):
+    """
+    Return the first truthy element from `it`.
+    """
+    for elem in it:
+        if elem:
+            return elem
+    return default
+
+
+class Spec(with_metaclass(abc.ABCMeta, object)):
+    """
+    A source for variables that comprise a connection configuration.
+    """
+
+    @abc.abstractmethod
+    def transport(self):
+        """
+        The name of the Ansible plug-in implementing the connection.
+        """
+
+    @abc.abstractmethod
+    def inventory_name(self):
+        """
+        The name of the target being connected to as it appears in Ansible's
+        inventory.
+        """
+
+    @abc.abstractmethod
+    def remote_addr(self):
+        """
+        The network address of the target, or for container and other special
+        targets, some other unique identifier.
+        """
+
+    @abc.abstractmethod
+    def remote_user(self):
+        """
+        The username of the login account on the target.
+        """
+
+    @abc.abstractmethod
+    def password(self):
+        """
+        The password of the login account on the target.
+        """
+
+    @abc.abstractmethod
+    def become(self):
+        """
+        :data:`True` if privilege escalation should be active.
+        """
+
+    @abc.abstractmethod
+    def become_method(self):
+        """
+        The name of the Ansible become method to use.
+        """
+
+    @abc.abstractmethod
+    def become_user(self):
+        """
+        The username of the target account for become.
+        """
+
+    @abc.abstractmethod
+    def become_pass(self):
+        """
+        The password of the target account for become.
+        """
+
+    @abc.abstractmethod
+    def port(self):
+        """
+        The port of the login service on the target machine.
+        """
+
+    @abc.abstractmethod
+    def python_path(self):
+        """
+        Path to the Python interpreter on the target machine.
+        """
+
+    @abc.abstractmethod
+    def private_key_file(self):
+        """
+        Path to the SSH private key file to use to login.
+        """
+
+    @abc.abstractmethod
+    def ssh_executable(self):
+        """
+        Path to the SSH executable.
+        """
+
+    @abc.abstractmethod
+    def timeout(self):
+        """
+        The generic timeout for all connections.
+        """
+
+    @abc.abstractmethod
+    def ansible_ssh_timeout(self):
+        """
+        The SSH-specific timeout for a connection.
+        """
+
+    @abc.abstractmethod
+    def ssh_args(self):
+        """
+        The list of additional arguments that should be included in an SSH
+        invocation.
+        """
+
+    @abc.abstractmethod
+    def become_exe(self):
+        """
+        The path to the executable implementing the become method on the remote
+        machine.
+        """
+
+    @abc.abstractmethod
+    def sudo_args(self):
+        """
+        The list of additional arguments that should be included in a become
+        invocation.
+        """
+        # TODO: split out into sudo_args/become_args.
+
+    @abc.abstractmethod
+    def mitogen_via(self):
+        """
+        The value of the mitogen_via= variable for this connection. Indicates
+        the connection should be established via an intermediary.
+        """
+
+    @abc.abstractmethod
+    def mitogen_kind(self):
+        """
+        The type of container to use with the "setns" transport.
+        """
+
+    @abc.abstractmethod
+    def mitogen_mask_remote_name(self):
+        """
+        Specifies whether to set a fixed "remote_name" field. The remote_name
+        is the suffix of `argv[0]` for remote interpreters. By default it
+        includes identifying information from the local process, which may be
+        undesirable in some circumstances.
+        """
+
+    @abc.abstractmethod
+    def mitogen_buildah_path(self):
+        """
+        The path to the "buildah" program for the 'buildah' transport.
+        """
+
+    @abc.abstractmethod
+    def mitogen_docker_path(self):
+        """
+        The path to the "docker" program for the 'docker' transport.
+        """
+
+    @abc.abstractmethod
+    def mitogen_kubectl_path(self):
+        """
+        The path to the "kubectl" program for the 'docker' transport.
+        """
+
+    @abc.abstractmethod
+    def mitogen_lxc_path(self):
+        """
+        The path to the "lxc" program for the 'lxd' transport.
+        """
+
+    @abc.abstractmethod
+    def mitogen_lxc_attach_path(self):
+        """
+        The path to the "lxc-attach" program for the 'lxc' transport.
+        """
+
+    @abc.abstractmethod
+    def mitogen_lxc_info_path(self):
+        """
+        The path to the "lxc-info" program for the 'lxc' transport.
+        """
+
+    @abc.abstractmethod
+    def mitogen_machinectl_path(self):
+        """
+        The path to the "machinectl" program for the 'setns' transport.
+        """
+
+    @abc.abstractmethod
+    def mitogen_ssh_keepalive_interval(self):
+        """
+        The SSH ServerAliveInterval.
+        """
+
+    @abc.abstractmethod
+    def mitogen_ssh_keepalive_count(self):
+        """
+        The SSH ServerAliveCount.
+        """
+
+    @abc.abstractmethod
+    def mitogen_ssh_debug_level(self):
+        """
+        The SSH debug level.
+        """
+
+    @abc.abstractmethod
+    def mitogen_ssh_compression(self):
+        """
+        Whether SSH compression is enabled.
+        """
+
+    @abc.abstractmethod
+    def extra_args(self):
+        """
+        Connection-specific arguments.
+        """
+
+    @abc.abstractmethod
+    def ansible_doas_exe(self):
+        """
+        Value of "ansible_doas_exe" variable.
+        """
+
+
+class PlayContextSpec(Spec):
+    """
+    PlayContextSpec takes almost all its information as-is from Ansible's
+    PlayContext. It is used for normal connections and delegate_to connections,
+    and should always be accurate.
+    """
+    def __init__(self, connection, play_context, transport, inventory_name):
+        self._connection = connection
+        self._play_context = play_context
+        self._transport = transport
+        self._inventory_name = inventory_name
+
+    def transport(self):
+        return self._transport
+
+    def inventory_name(self):
+        return self._inventory_name
+
+    def remote_addr(self):
+        return self._play_context.remote_addr
+
+    def remote_user(self):
+        return self._play_context.remote_user
+
+    def become(self):
+        return self._play_context.become
+
+    def become_method(self):
+        return self._play_context.become_method
+
+    def become_user(self):
+        return self._play_context.become_user
+
+    def become_pass(self):
+        return optional_secret(self._play_context.become_pass)
+
+    def password(self):
+        return optional_secret(self._play_context.password)
+
+    def port(self):
+        return self._play_context.port
+
+    def python_path(self):
+        s = self._connection.get_task_var('ansible_python_interpreter')
+        # #511, #536: executor/module_common.py::_get_shebang() hard-wires
+        # "/usr/bin/python" as the default interpreter path if no other
+        # interpreter is specified.
+        return parse_python_path(s or '/usr/bin/python')
+
+    def private_key_file(self):
+        return self._play_context.private_key_file
+
+    def ssh_executable(self):
+        return self._play_context.ssh_executable
+
+    def timeout(self):
+        return self._play_context.timeout
+
+    def ansible_ssh_timeout(self):
+        return (
+            self._connection.get_task_var('ansible_timeout') or
+            self._connection.get_task_var('ansible_ssh_timeout') or
+            self.timeout()
+        )
+
+    def ssh_args(self):
+        return [
+            mitogen.core.to_text(term)
+            for s in (
+                getattr(self._play_context, 'ssh_args', ''),
+                getattr(self._play_context, 'ssh_common_args', ''),
+                getattr(self._play_context, 'ssh_extra_args', '')
+            )
+            for term in ansible.utils.shlex.shlex_split(s or '')
+        ]
+
+    def become_exe(self):
+        # In Ansible 2.8, PlayContext.become_exe always has a default value due
+        # to the new options mechanism. Previously it was only set if a value
+        # ("somewhere") had been specified for the task.
+        # For consistency in the tests, here we make older Ansibles behave like
+        # newer Ansibles.
+        exe = self._play_context.become_exe
+        if exe is None and self._play_context.become_method == 'sudo':
+            exe = 'sudo'
+        return exe
+
+    def sudo_args(self):
+        return [
+            mitogen.core.to_text(term)
+            for term in ansible.utils.shlex.shlex_split(
+                first_true((
+                    self._play_context.become_flags,
+                    # Ansible <=2.7.
+                    getattr(self._play_context, 'sudo_flags', ''),
+                    # Ansible <=2.3.
+                    getattr(C, 'DEFAULT_BECOME_FLAGS', ''),
+                    getattr(C, 'DEFAULT_SUDO_FLAGS', '')
+                ), default='')
+            )
+        ]
+
+    def mitogen_via(self):
+        return self._connection.get_task_var('mitogen_via')
+
+    def mitogen_kind(self):
+        return self._connection.get_task_var('mitogen_kind')
+
+    def mitogen_mask_remote_name(self):
+        return self._connection.get_task_var('mitogen_mask_remote_name')
+
+    def mitogen_buildah_path(self):
+        return self._connection.get_task_var('mitogen_buildah_path')
+
+    def mitogen_docker_path(self):
+        return self._connection.get_task_var('mitogen_docker_path')
+
+    def mitogen_kubectl_path(self):
+        return self._connection.get_task_var('mitogen_kubectl_path')
+
+    def mitogen_lxc_path(self):
+        return self._connection.get_task_var('mitogen_lxc_path')
+
+    def mitogen_lxc_attach_path(self):
+        return self._connection.get_task_var('mitogen_lxc_attach_path')
+
+    def mitogen_lxc_info_path(self):
+        return self._connection.get_task_var('mitogen_lxc_info_path')
+
+    def mitogen_ssh_keepalive_interval(self):
+        return self._connection.get_task_var('mitogen_ssh_keepalive_interval')
+
+    def mitogen_ssh_keepalive_count(self):
+        return self._connection.get_task_var('mitogen_ssh_keepalive_count')
+
+    def mitogen_machinectl_path(self):
+        return self._connection.get_task_var('mitogen_machinectl_path')
+
+    def mitogen_ssh_debug_level(self):
+        return self._connection.get_task_var('mitogen_ssh_debug_level')
+
+    def mitogen_ssh_compression(self):
+        return self._connection.get_task_var('mitogen_ssh_compression')
+
+    def extra_args(self):
+        return self._connection.get_extra_args()
+
+    def ansible_doas_exe(self):
+        return (
+            self._connection.get_task_var('ansible_doas_exe') or
+            os.environ.get('ANSIBLE_DOAS_EXE')
+        )
+
+
+class MitogenViaSpec(Spec):
+    """
+    MitogenViaSpec takes most of its information from the HostVars of the
+    running task. HostVars is a lightweight wrapper around VariableManager, so
+    it is better to say that VariableManager.get_vars() is the ultimate source
+    of MitogenViaSpec's information.
+
+    Due to this, mitogen_via= hosts must have all their configuration
+    information represented as host and group variables. We cannot use any
+    per-task configuration, as all that data belongs to the real target host.
+
+    Ansible uses all kinds of strange historical logic for calculating
+    variables, including making their precedence configurable. MitogenViaSpec
+    must ultimately reimplement all of that logic. It is likely that if you are
+    having a configruation problem with connection delegation, the answer to
+    your problem lies in the method implementations below!
+    """
+    def __init__(self, inventory_name, host_vars, become_method, become_user,
+                 play_context):
+        """
+        :param str inventory_name:
+            The inventory name of the intermediary machine, i.e. not the target
+            machine.
+        :param dict host_vars:
+            The HostVars magic dictionary provided by Ansible in task_vars.
+        :param str become_method:
+            If the mitogen_via= spec included a become method, the method it
+            specifies.
+        :param str become_user:
+            If the mitogen_via= spec included a become user, the user it
+            specifies.
+        :param PlayContext play_context:
+            For some global values **only**, the PlayContext used to describe
+            the real target machine. Values from this object are **strictly
+            restricted** to values that are Ansible-global, e.g. the passwords
+            specified interactively.
+        """
+        self._inventory_name = inventory_name
+        self._host_vars = host_vars
+        self._become_method = become_method
+        self._become_user = become_user
+        # Dangerous! You may find a variable you want in this object, but it's
+        # almost certainly for the wrong machine!
+        self._dangerous_play_context = play_context
+
+    def transport(self):
+        return (
+            self._host_vars.get('ansible_connection') or
+            C.DEFAULT_TRANSPORT
+        )
+
+    def inventory_name(self):
+        return self._inventory_name
+
+    def remote_addr(self):
+        # play_context.py::MAGIC_VARIABLE_MAPPING
+        return (
+            self._host_vars.get('ansible_ssh_host') or
+            self._host_vars.get('ansible_host') or
+            self._inventory_name
+        )
+
+    def remote_user(self):
+        return (
+            self._host_vars.get('ansible_ssh_user') or
+            self._host_vars.get('ansible_user') or
+            C.DEFAULT_REMOTE_USER
+        )
+
+    def become(self):
+        return bool(self._become_user)
+
+    def become_method(self):
+        return (
+            self._become_method or
+            self._host_vars.get('ansible_become_method') or
+            C.DEFAULT_BECOME_METHOD
+        )
+
+    def become_user(self):
+        return self._become_user
+
+    def become_pass(self):
+        return optional_secret(
+            self._host_vars.get('ansible_become_password') or
+            self._host_vars.get('ansible_become_pass')
+        )
+
+    def password(self):
+        return optional_secret(
+            self._host_vars.get('ansible_ssh_pass') or
+            self._host_vars.get('ansible_password')
+        )
+
+    def port(self):
+        return (
+            self._host_vars.get('ansible_ssh_port') or
+            self._host_vars.get('ansible_port') or
+            C.DEFAULT_REMOTE_PORT
+        )
+
+    def python_path(self):
+        s = self._host_vars.get('ansible_python_interpreter')
+        # #511, #536: executor/module_common.py::_get_shebang() hard-wires
+        # "/usr/bin/python" as the default interpreter path if no other
+        # interpreter is specified.
+        return parse_python_path(s or '/usr/bin/python')
+
+    def private_key_file(self):
+        # TODO: must come from PlayContext too.
+        return (
+            self._host_vars.get('ansible_ssh_private_key_file') or
+            self._host_vars.get('ansible_private_key_file') or
+            C.DEFAULT_PRIVATE_KEY_FILE
+        )
+
+    def ssh_executable(self):
+        return (
+            self._host_vars.get('ansible_ssh_executable') or
+            C.ANSIBLE_SSH_EXECUTABLE
+        )
+
+    def timeout(self):
+        # TODO: must come from PlayContext too.
+        return C.DEFAULT_TIMEOUT
+
+    def ansible_ssh_timeout(self):
+        return (
+            self._host_vars.get('ansible_timeout') or
+            self._host_vars.get('ansible_ssh_timeout') or
+            self.timeout()
+        )
+
+    def ssh_args(self):
+        return [
+            mitogen.core.to_text(term)
+            for s in (
+                (
+                    self._host_vars.get('ansible_ssh_args') or
+                    getattr(C, 'ANSIBLE_SSH_ARGS', None) or
+                    os.environ.get('ANSIBLE_SSH_ARGS')
+                    # TODO: ini entry. older versions.
+                ),
+                (
+                    self._host_vars.get('ansible_ssh_common_args') or
+                    os.environ.get('ANSIBLE_SSH_COMMON_ARGS')
+                    # TODO: ini entry.
+                ),
+                (
+                    self._host_vars.get('ansible_ssh_extra_args') or
+                    os.environ.get('ANSIBLE_SSH_EXTRA_ARGS')
+                    # TODO: ini entry.
+                ),
+            )
+            for term in ansible.utils.shlex.shlex_split(s)
+            if s
+        ]
+
+    def become_exe(self):
+        return (
+            self._host_vars.get('ansible_become_exe') or
+            C.DEFAULT_BECOME_EXE
+        )
+
+    def sudo_args(self):
+        return [
+            mitogen.core.to_text(term)
+            for s in (
+                self._host_vars.get('ansible_sudo_flags') or '',
+                self._host_vars.get('ansible_become_flags') or '',
+            )
+            for term in ansible.utils.shlex.shlex_split(s)
+        ]
+
+    def mitogen_via(self):
+        return self._host_vars.get('mitogen_via')
+
+    def mitogen_kind(self):
+        return self._host_vars.get('mitogen_kind')
+
+    def mitogen_mask_remote_name(self):
+        return self._host_vars.get('mitogen_mask_remote_name')
+
+    def mitogen_buildah_path(self):
+        return self._host_vars.get('mitogen_buildah_path')
+
+    def mitogen_docker_path(self):
+        return self._host_vars.get('mitogen_docker_path')
+
+    def mitogen_kubectl_path(self):
+        return self._host_vars.get('mitogen_kubectl_path')
+
+    def mitogen_lxc_path(self):
+        return self.host_vars.get('mitogen_lxc_path')
+
+    def mitogen_lxc_attach_path(self):
+        return self._host_vars.get('mitogen_lxc_attach_path')
+
+    def mitogen_lxc_info_path(self):
+        return self._host_vars.get('mitogen_lxc_info_path')
+
+    def mitogen_ssh_keepalive_interval(self):
+        return self._host_vars.get('mitogen_ssh_keepalive_interval')
+
+    def mitogen_ssh_keepalive_count(self):
+        return self._host_vars.get('mitogen_ssh_keepalive_count')
+
+    def mitogen_machinectl_path(self):
+        return self._host_vars.get('mitogen_machinectl_path')
+
+    def mitogen_ssh_debug_level(self):
+        return self._host_vars.get('mitogen_ssh_debug_level')
+
+    def mitogen_ssh_compression(self):
+        return self._host_vars.get('mitogen_ssh_compression')
+
+    def extra_args(self):
+        return []  # TODO
+
+    def ansible_doas_exe(self):
+        return (
+            self._host_vars.get('ansible_doas_exe') or
+            os.environ.get('ANSIBLE_DOAS_EXE')
+        )
diff --git a/deployment/lib/mitogen-0.2.9/mitogen.egg-info/PKG-INFO b/deployment/lib/mitogen-0.2.9/mitogen.egg-info/PKG-INFO
new file mode 100644
index 0000000000000000000000000000000000000000..3346b33426457f1c755a46348d38edaa5333d687
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen.egg-info/PKG-INFO
@@ -0,0 +1,23 @@
+Metadata-Version: 1.1
+Name: mitogen
+Version: 0.2.9
+Summary: Library for writing distributed self-replicating programs.
+Home-page: https://github.com/dw/mitogen/
+Author: David Wilson
+Author-email: UNKNOWN
+License: New BSD
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: Environment :: Console
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2.4
+Classifier: Programming Language :: Python :: 2.5
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Topic :: System :: Distributed Computing
+Classifier: Topic :: System :: Systems Administration
diff --git a/deployment/lib/mitogen-0.2.9/mitogen.egg-info/SOURCES.txt b/deployment/lib/mitogen-0.2.9/mitogen.egg-info/SOURCES.txt
new file mode 100644
index 0000000000000000000000000000000000000000..afaccd9dc43b00dd1c54e166aca38fa2cde5b5d5
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen.egg-info/SOURCES.txt
@@ -0,0 +1,81 @@
+LICENSE
+MANIFEST.in
+README.md
+setup.cfg
+setup.py
+ansible_mitogen/__init__.py
+ansible_mitogen/affinity.py
+ansible_mitogen/connection.py
+ansible_mitogen/loaders.py
+ansible_mitogen/logging.py
+ansible_mitogen/mixins.py
+ansible_mitogen/module_finder.py
+ansible_mitogen/parsing.py
+ansible_mitogen/planner.py
+ansible_mitogen/process.py
+ansible_mitogen/runner.py
+ansible_mitogen/services.py
+ansible_mitogen/strategy.py
+ansible_mitogen/target.py
+ansible_mitogen/transport_config.py
+ansible_mitogen/compat/__init__.py
+ansible_mitogen/compat/simplejson/__init__.py
+ansible_mitogen/compat/simplejson/decoder.py
+ansible_mitogen/compat/simplejson/encoder.py
+ansible_mitogen/compat/simplejson/scanner.py
+ansible_mitogen/plugins/__init__.py
+ansible_mitogen/plugins/action/__init__.py
+ansible_mitogen/plugins/action/mitogen_fetch.py
+ansible_mitogen/plugins/action/mitogen_get_stack.py
+ansible_mitogen/plugins/connection/__init__.py
+ansible_mitogen/plugins/connection/mitogen_buildah.py
+ansible_mitogen/plugins/connection/mitogen_doas.py
+ansible_mitogen/plugins/connection/mitogen_docker.py
+ansible_mitogen/plugins/connection/mitogen_jail.py
+ansible_mitogen/plugins/connection/mitogen_kubectl.py
+ansible_mitogen/plugins/connection/mitogen_local.py
+ansible_mitogen/plugins/connection/mitogen_lxc.py
+ansible_mitogen/plugins/connection/mitogen_lxd.py
+ansible_mitogen/plugins/connection/mitogen_machinectl.py
+ansible_mitogen/plugins/connection/mitogen_setns.py
+ansible_mitogen/plugins/connection/mitogen_ssh.py
+ansible_mitogen/plugins/connection/mitogen_su.py
+ansible_mitogen/plugins/connection/mitogen_sudo.py
+ansible_mitogen/plugins/strategy/__init__.py
+ansible_mitogen/plugins/strategy/mitogen.py
+ansible_mitogen/plugins/strategy/mitogen_free.py
+ansible_mitogen/plugins/strategy/mitogen_host_pinned.py
+ansible_mitogen/plugins/strategy/mitogen_linear.py
+mitogen/__init__.py
+mitogen/buildah.py
+mitogen/core.py
+mitogen/debug.py
+mitogen/doas.py
+mitogen/docker.py
+mitogen/fakessh.py
+mitogen/fork.py
+mitogen/jail.py
+mitogen/kubectl.py
+mitogen/lxc.py
+mitogen/lxd.py
+mitogen/master.py
+mitogen/minify.py
+mitogen/os_fork.py
+mitogen/parent.py
+mitogen/profiler.py
+mitogen/select.py
+mitogen/service.py
+mitogen/setns.py
+mitogen/ssh.py
+mitogen/su.py
+mitogen/sudo.py
+mitogen/unix.py
+mitogen/utils.py
+mitogen.egg-info/PKG-INFO
+mitogen.egg-info/SOURCES.txt
+mitogen.egg-info/dependency_links.txt
+mitogen.egg-info/not-zip-safe
+mitogen.egg-info/top_level.txt
+mitogen/compat/__init__.py
+mitogen/compat/pkgutil.py
+mitogen/compat/tokenize.py
\ No newline at end of file
diff --git a/deployment/lib/mitogen-0.2.9/mitogen.egg-info/dependency_links.txt b/deployment/lib/mitogen-0.2.9/mitogen.egg-info/dependency_links.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/deployment/lib/mitogen-0.2.9/mitogen.egg-info/not-zip-safe b/deployment/lib/mitogen-0.2.9/mitogen.egg-info/not-zip-safe
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen.egg-info/not-zip-safe
@@ -0,0 +1 @@
+
diff --git a/deployment/lib/mitogen-0.2.9/mitogen.egg-info/top_level.txt b/deployment/lib/mitogen-0.2.9/mitogen.egg-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2360b3f0c5f3fd1b9dcd5e141f9bc4aab4ceac23
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen.egg-info/top_level.txt
@@ -0,0 +1,2 @@
+ansible_mitogen
+mitogen
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/__init__.py b/deployment/lib/mitogen-0.2.9/mitogen/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f18c5a900c63ef7cf7a6a1697f5915cc48972fea
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/__init__.py
@@ -0,0 +1,120 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+On the Mitogen master, this is imported from ``mitogen/__init__.py`` as would
+be expected. On the slave, it is built dynamically during startup.
+"""
+
+
+#: Library version as a tuple.
+__version__ = (0, 2, 9)
+
+
+#: This is :data:`False` in slave contexts. Previously it was used to prevent
+#: re-execution of :mod:`__main__` in single file programs, however that now
+#: happens automatically.
+is_master = True
+
+
+#: This is `0` in a master, otherwise it is the master-assigned ID unique to
+#: the slave context used for message routing.
+context_id = 0
+
+
+#: This is :data:`None` in a master, otherwise it is the master-assigned ID
+#: unique to the slave's parent context.
+parent_id = None
+
+
+#: This is an empty list in a master, otherwise it is a list of parent context
+#: IDs ordered from most direct to least direct.
+parent_ids = []
+
+
+import os
+_default_profiling = os.environ.get('MITOGEN_PROFILING') is not None
+del os
+
+
+def main(log_level='INFO', profiling=_default_profiling):
+    """
+    Convenience decorator primarily useful for writing discardable test
+    scripts.
+
+    In the master process, when `func` is defined in the :mod:`__main__`
+    module, arranges for `func(router)` to be invoked immediately, with
+    :py:class:`mitogen.master.Router` construction and destruction handled just
+    as in :py:func:`mitogen.utils.run_with_router`. In slaves, this function
+    does nothing.
+
+    :param str log_level:
+        Logging package level to configure via
+        :py:func:`mitogen.utils.log_to_file`.
+
+    :param bool profiling:
+        If :py:data:`True`, equivalent to setting
+        :py:attr:`mitogen.master.Router.profiling` prior to router
+        construction. This causes ``/tmp`` files to be created everywhere at
+        the end of a successful run with :py:mod:`cProfile` output for every
+        thread.
+
+    Example:
+
+    ::
+
+        import mitogen
+        import requests
+
+        def get_url(url):
+            return requests.get(url).text
+
+        @mitogen.main()
+        def main(router):
+            z = router.ssh(hostname='k3')
+            print(z.call(get_url, 'https://example.org/')))))
+
+    """
+
+    def wrapper(func):
+        if func.__module__ != '__main__':
+            return func
+        import mitogen.parent
+        import mitogen.utils
+        if profiling:
+            mitogen.core.enable_profiling()
+            mitogen.master.Router.profiling = profiling
+        mitogen.utils.log_to_file(level=log_level)
+        return mitogen.core._profile_hook(
+            'app.main',
+            mitogen.utils.run_with_router,
+            func,
+        )
+    return wrapper
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/buildah.py b/deployment/lib/mitogen-0.2.9/mitogen/buildah.py
new file mode 100644
index 0000000000000000000000000000000000000000..f850234d66de99bc7e912805f7ce738d133cabce
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/buildah.py
@@ -0,0 +1,73 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import logging
+
+import mitogen.core
+import mitogen.parent
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Options(mitogen.parent.Options):
+    container = None
+    username = None
+    buildah_path = 'buildah'
+
+    def __init__(self, container=None, buildah_path=None, username=None,
+                 **kwargs):
+        super(Options, self).__init__(**kwargs)
+        assert container is not None
+        self.container = container
+        if buildah_path:
+            self.buildah_path = buildah_path
+        if username:
+            self.username = username
+
+
+class Connection(mitogen.parent.Connection):
+    options_class = Options
+    child_is_immediate_subprocess = False
+
+    # TODO: better way of capturing errors such as "No such container."
+    create_child_args = {
+        'merge_stdio': True
+    }
+
+    def _get_name(self):
+        return u'buildah.' + self.options.container
+
+    def get_boot_command(self):
+        args = [self.options.buildah_path, 'run']
+        if self.options.username:
+            args += ['--user=' + self.options.username]
+        args += ['--', self.options.container]
+        return args + super(Connection, self).get_boot_command()
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/compat/__init__.py b/deployment/lib/mitogen-0.2.9/mitogen/compat/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/compat/pkgutil.py b/deployment/lib/mitogen-0.2.9/mitogen/compat/pkgutil.py
new file mode 100644
index 0000000000000000000000000000000000000000..15eb2afa340045fb1ded64a7bf0d6ecf493cf4f3
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/compat/pkgutil.py
@@ -0,0 +1,594 @@
+"""Utilities to support packages."""
+
+# !mitogen: minify_safe
+
+# NOTE: This module must remain compatible with Python 2.3, as it is shared
+# by setuptools for distribution with Python 2.3 and up.
+
+import os
+import sys
+import imp
+import os.path
+from types import ModuleType
+
+__all__ = [
+    'get_importer', 'iter_importers', 'get_loader', 'find_loader',
+    'walk_packages', 'iter_modules', 'get_data',
+    'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
+]
+
+def read_code(stream):
+    # This helper is needed in order for the PEP 302 emulation to
+    # correctly handle compiled files
+    import marshal
+
+    magic = stream.read(4)
+    if magic != imp.get_magic():
+        return None
+
+    stream.read(4) # Skip timestamp
+    return marshal.load(stream)
+
+
+def simplegeneric(func):
+    """Make a trivial single-dispatch generic function"""
+    registry = {}
+    def wrapper(*args, **kw):
+        ob = args[0]
+        try:
+            cls = ob.__class__
+        except AttributeError:
+            cls = type(ob)
+        try:
+            mro = cls.__mro__
+        except AttributeError:
+            try:
+                class cls(cls, object):
+                    pass
+                mro = cls.__mro__[1:]
+            except TypeError:
+                mro = object,   # must be an ExtensionClass or some such  :(
+        for t in mro:
+            if t in registry:
+                return registry[t](*args, **kw)
+        else:
+            return func(*args, **kw)
+    try:
+        wrapper.__name__ = func.__name__
+    except (TypeError, AttributeError):
+        pass    # Python 2.3 doesn't allow functions to be renamed
+
+    def register(typ, func=None):
+        if func is None:
+            return lambda f: register(typ, f)
+        registry[typ] = func
+        return func
+
+    wrapper.__dict__ = func.__dict__
+    wrapper.__doc__ = func.__doc__
+    wrapper.register = register
+    return wrapper
+
+
+def walk_packages(path=None, prefix='', onerror=None):
+    """Yields (module_loader, name, ispkg) for all modules recursively
+    on path, or, if path is None, all accessible modules.
+
+    'path' should be either None or a list of paths to look for
+    modules in.
+
+    'prefix' is a string to output on the front of every module name
+    on output.
+
+    Note that this function must import all *packages* (NOT all
+    modules!) on the given path, in order to access the __path__
+    attribute to find submodules.
+
+    'onerror' is a function which gets called with one argument (the
+    name of the package which was being imported) if any exception
+    occurs while trying to import a package.  If no onerror function is
+    supplied, ImportErrors are caught and ignored, while all other
+    exceptions are propagated, terminating the search.
+
+    Examples:
+
+    # list all modules python can access
+    walk_packages()
+
+    # list all submodules of ctypes
+    walk_packages(ctypes.__path__, ctypes.__name__+'.')
+    """
+
+    def seen(p, m={}):
+        if p in m:
+            return True
+        m[p] = True
+
+    for importer, name, ispkg in iter_modules(path, prefix):
+        yield importer, name, ispkg
+
+        if ispkg:
+            try:
+                __import__(name)
+            except ImportError:
+                if onerror is not None:
+                    onerror(name)
+            except Exception:
+                if onerror is not None:
+                    onerror(name)
+                else:
+                    raise
+            else:
+                path = getattr(sys.modules[name], '__path__', None) or []
+
+                # don't traverse path items we've seen before
+                path = [p for p in path if not seen(p)]
+
+                for item in walk_packages(path, name+'.', onerror):
+                    yield item
+
+
+def iter_modules(path=None, prefix=''):
+    """Yields (module_loader, name, ispkg) for all submodules on path,
+    or, if path is None, all top-level modules on sys.path.
+
+    'path' should be either None or a list of paths to look for
+    modules in.
+
+    'prefix' is a string to output on the front of every module name
+    on output.
+    """
+
+    if path is None:
+        importers = iter_importers()
+    else:
+        importers = map(get_importer, path)
+
+    yielded = {}
+    for i in importers:
+        for name, ispkg in iter_importer_modules(i, prefix):
+            if name not in yielded:
+                yielded[name] = 1
+                yield i, name, ispkg
+
+
+#@simplegeneric
+def iter_importer_modules(importer, prefix=''):
+    if not hasattr(importer, 'iter_modules'):
+        return []
+    return importer.iter_modules(prefix)
+
+iter_importer_modules = simplegeneric(iter_importer_modules)
+
+
+class ImpImporter:
+    """PEP 302 Importer that wraps Python's "classic" import algorithm
+
+    ImpImporter(dirname) produces a PEP 302 importer that searches that
+    directory.  ImpImporter(None) produces a PEP 302 importer that searches
+    the current sys.path, plus any modules that are frozen or built-in.
+
+    Note that ImpImporter does not currently support being used by placement
+    on sys.meta_path.
+    """
+
+    def __init__(self, path=None):
+        self.path = path
+
+    def find_module(self, fullname, path=None):
+        # Note: we ignore 'path' argument since it is only used via meta_path
+        subname = fullname.split(".")[-1]
+        if subname != fullname and self.path is None:
+            return None
+        if self.path is None:
+            path = None
+        else:
+            path = [os.path.realpath(self.path)]
+        try:
+            file, filename, etc = imp.find_module(subname, path)
+        except ImportError:
+            return None
+        return ImpLoader(fullname, file, filename, etc)
+
+    def iter_modules(self, prefix=''):
+        if self.path is None or not os.path.isdir(self.path):
+            return
+
+        yielded = {}
+        import inspect
+        try:
+            filenames = os.listdir(self.path)
+        except OSError:
+            # ignore unreadable directories like import does
+            filenames = []
+        filenames.sort()  # handle packages before same-named modules
+
+        for fn in filenames:
+            modname = inspect.getmodulename(fn)
+            if modname=='__init__' or modname in yielded:
+                continue
+
+            path = os.path.join(self.path, fn)
+            ispkg = False
+
+            if not modname and os.path.isdir(path) and '.' not in fn:
+                modname = fn
+                try:
+                    dircontents = os.listdir(path)
+                except OSError:
+                    # ignore unreadable directories like import does
+                    dircontents = []
+                for fn in dircontents:
+                    subname = inspect.getmodulename(fn)
+                    if subname=='__init__':
+                        ispkg = True
+                        break
+                else:
+                    continue    # not a package
+
+            if modname and '.' not in modname:
+                yielded[modname] = 1
+                yield prefix + modname, ispkg
+
+
+class ImpLoader:
+    """PEP 302 Loader that wraps Python's "classic" import algorithm
+    """
+    code = source = None
+
+    def __init__(self, fullname, file, filename, etc):
+        self.file = file
+        self.filename = filename
+        self.fullname = fullname
+        self.etc = etc
+
+    def load_module(self, fullname):
+        self._reopen()
+        try:
+            mod = imp.load_module(fullname, self.file, self.filename, self.etc)
+        finally:
+            if self.file:
+                self.file.close()
+        # Note: we don't set __loader__ because we want the module to look
+        # normal; i.e. this is just a wrapper for standard import machinery
+        return mod
+
+    def get_data(self, pathname):
+        return open(pathname, "rb").read()
+
+    def _reopen(self):
+        if self.file and self.file.closed:
+            mod_type = self.etc[2]
+            if mod_type==imp.PY_SOURCE:
+                self.file = open(self.filename, 'rU')
+            elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION):
+                self.file = open(self.filename, 'rb')
+
+    def _fix_name(self, fullname):
+        if fullname is None:
+            fullname = self.fullname
+        elif fullname != self.fullname:
+            raise ImportError("Loader for module %s cannot handle "
+                              "module %s" % (self.fullname, fullname))
+        return fullname
+
+    def is_package(self, fullname):
+        fullname = self._fix_name(fullname)
+        return self.etc[2]==imp.PKG_DIRECTORY
+
+    def get_code(self, fullname=None):
+        fullname = self._fix_name(fullname)
+        if self.code is None:
+            mod_type = self.etc[2]
+            if mod_type==imp.PY_SOURCE:
+                source = self.get_source(fullname)
+                self.code = compile(source, self.filename, 'exec')
+            elif mod_type==imp.PY_COMPILED:
+                self._reopen()
+                try:
+                    self.code = read_code(self.file)
+                finally:
+                    self.file.close()
+            elif mod_type==imp.PKG_DIRECTORY:
+                self.code = self._get_delegate().get_code()
+        return self.code
+
+    def get_source(self, fullname=None):
+        fullname = self._fix_name(fullname)
+        if self.source is None:
+            mod_type = self.etc[2]
+            if mod_type==imp.PY_SOURCE:
+                self._reopen()
+                try:
+                    self.source = self.file.read()
+                finally:
+                    self.file.close()
+            elif mod_type==imp.PY_COMPILED:
+                if os.path.exists(self.filename[:-1]):
+                    f = open(self.filename[:-1], 'rU')
+                    self.source = f.read()
+                    f.close()
+            elif mod_type==imp.PKG_DIRECTORY:
+                self.source = self._get_delegate().get_source()
+        return self.source
+
+
+    def _get_delegate(self):
+        return ImpImporter(self.filename).find_module('__init__')
+
+    def get_filename(self, fullname=None):
+        fullname = self._fix_name(fullname)
+        mod_type = self.etc[2]
+        if self.etc[2]==imp.PKG_DIRECTORY:
+            return self._get_delegate().get_filename()
+        elif self.etc[2] in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION):
+            return self.filename
+        return None
+
+
+try:
+    import zipimport
+    from zipimport import zipimporter
+
+    def iter_zipimport_modules(importer, prefix=''):
+        dirlist = zipimport._zip_directory_cache[importer.archive].keys()
+        dirlist.sort()
+        _prefix = importer.prefix
+        plen = len(_prefix)
+        yielded = {}
+        import inspect
+        for fn in dirlist:
+            if not fn.startswith(_prefix):
+                continue
+
+            fn = fn[plen:].split(os.sep)
+
+            if len(fn)==2 and fn[1].startswith('__init__.py'):
+                if fn[0] not in yielded:
+                    yielded[fn[0]] = 1
+                    yield fn[0], True
+
+            if len(fn)!=1:
+                continue
+
+            modname = inspect.getmodulename(fn[0])
+            if modname=='__init__':
+                continue
+
+            if modname and '.' not in modname and modname not in yielded:
+                yielded[modname] = 1
+                yield prefix + modname, False
+
+    iter_importer_modules.register(zipimporter, iter_zipimport_modules)
+
+except ImportError:
+    pass
+
+
+def get_importer(path_item):
+    """Retrieve a PEP 302 importer for the given path item
+
+    The returned importer is cached in sys.path_importer_cache
+    if it was newly created by a path hook.
+
+    If there is no importer, a wrapper around the basic import
+    machinery is returned. This wrapper is never inserted into
+    the importer cache (None is inserted instead).
+
+    The cache (or part of it) can be cleared manually if a
+    rescan of sys.path_hooks is necessary.
+    """
+    try:
+        importer = sys.path_importer_cache[path_item]
+    except KeyError:
+        for path_hook in sys.path_hooks:
+            try:
+                importer = path_hook(path_item)
+                break
+            except ImportError:
+                pass
+        else:
+            importer = None
+        sys.path_importer_cache.setdefault(path_item, importer)
+
+    if importer is None:
+        try:
+            importer = ImpImporter(path_item)
+        except ImportError:
+            importer = None
+    return importer
+
+
+def iter_importers(fullname=""):
+    """Yield PEP 302 importers for the given module name
+
+    If fullname contains a '.', the importers will be for the package
+    containing fullname, otherwise they will be importers for sys.meta_path,
+    sys.path, and Python's "classic" import machinery, in that order.  If
+    the named module is in a package, that package is imported as a side
+    effect of invoking this function.
+
+    Non PEP 302 mechanisms (e.g. the Windows registry) used by the
+    standard import machinery to find files in alternative locations
+    are partially supported, but are searched AFTER sys.path. Normally,
+    these locations are searched BEFORE sys.path, preventing sys.path
+    entries from shadowing them.
+
+    For this to cause a visible difference in behaviour, there must
+    be a module or package name that is accessible via both sys.path
+    and one of the non PEP 302 file system mechanisms. In this case,
+    the emulation will find the former version, while the builtin
+    import mechanism will find the latter.
+
+    Items of the following types can be affected by this discrepancy:
+        imp.C_EXTENSION, imp.PY_SOURCE, imp.PY_COMPILED, imp.PKG_DIRECTORY
+    """
+    if fullname.startswith('.'):
+        raise ImportError("Relative module names not supported")
+    if '.' in fullname:
+        # Get the containing package's __path__
+        pkg = '.'.join(fullname.split('.')[:-1])
+        if pkg not in sys.modules:
+            __import__(pkg)
+        path = getattr(sys.modules[pkg], '__path__', None) or []
+    else:
+        for importer in sys.meta_path:
+            yield importer
+        path = sys.path
+    for item in path:
+        yield get_importer(item)
+    if '.' not in fullname:
+        yield ImpImporter()
+
+def get_loader(module_or_name):
+    """Get a PEP 302 "loader" object for module_or_name
+
+    If the module or package is accessible via the normal import
+    mechanism, a wrapper around the relevant part of that machinery
+    is returned.  Returns None if the module cannot be found or imported.
+    If the named module is not already imported, its containing package
+    (if any) is imported, in order to establish the package __path__.
+
+    This function uses iter_importers(), and is thus subject to the same
+    limitations regarding platform-specific special import locations such
+    as the Windows registry.
+    """
+    if module_or_name in sys.modules:
+        module_or_name = sys.modules[module_or_name]
+    if isinstance(module_or_name, ModuleType):
+        module = module_or_name
+        loader = getattr(module, '__loader__', None)
+        if loader is not None:
+            return loader
+        fullname = module.__name__
+    else:
+        fullname = module_or_name
+    return find_loader(fullname)
+
+def find_loader(fullname):
+    """Find a PEP 302 "loader" object for fullname
+
+    If fullname contains dots, path must be the containing package's __path__.
+    Returns None if the module cannot be found or imported. This function uses
+    iter_importers(), and is thus subject to the same limitations regarding
+    platform-specific special import locations such as the Windows registry.
+    """
+    for importer in iter_importers(fullname):
+        loader = importer.find_module(fullname)
+        if loader is not None:
+            return loader
+
+    return None
+
+
+def extend_path(path, name):
+    """Extend a package's path.
+
+    Intended use is to place the following code in a package's __init__.py:
+
+        from pkgutil import extend_path
+        __path__ = extend_path(__path__, __name__)
+
+    This will add to the package's __path__ all subdirectories of
+    directories on sys.path named after the package.  This is useful
+    if one wants to distribute different parts of a single logical
+    package as multiple directories.
+
+    It also looks for *.pkg files beginning where * matches the name
+    argument.  This feature is similar to *.pth files (see site.py),
+    except that it doesn't special-case lines starting with 'import'.
+    A *.pkg file is trusted at face value: apart from checking for
+    duplicates, all entries found in a *.pkg file are added to the
+    path, regardless of whether they are exist the filesystem.  (This
+    is a feature.)
+
+    If the input path is not a list (as is the case for frozen
+    packages) it is returned unchanged.  The input path is not
+    modified; an extended copy is returned.  Items are only appended
+    to the copy at the end.
+
+    It is assumed that sys.path is a sequence.  Items of sys.path that
+    are not (unicode or 8-bit) strings referring to existing
+    directories are ignored.  Unicode items of sys.path that cause
+    errors when used as filenames may cause this function to raise an
+    exception (in line with os.path.isdir() behavior).
+    """
+
+    if not isinstance(path, list):
+        # This could happen e.g. when this is called from inside a
+        # frozen package.  Return the path unchanged in that case.
+        return path
+
+    pname = os.path.join(*name.split('.')) # Reconstitute as relative path
+    # Just in case os.extsep != '.'
+    sname = os.extsep.join(name.split('.'))
+    sname_pkg = sname + os.extsep + "pkg"
+    init_py = "__init__" + os.extsep + "py"
+
+    path = path[:] # Start with a copy of the existing path
+
+    for dir in sys.path:
+        if not isinstance(dir, basestring) or not os.path.isdir(dir):
+            continue
+        subdir = os.path.join(dir, pname)
+        # XXX This may still add duplicate entries to path on
+        # case-insensitive filesystems
+        initfile = os.path.join(subdir, init_py)
+        if subdir not in path and os.path.isfile(initfile):
+            path.append(subdir)
+        # XXX Is this the right thing for subpackages like zope.app?
+        # It looks for a file named "zope.app.pkg"
+        pkgfile = os.path.join(dir, sname_pkg)
+        if os.path.isfile(pkgfile):
+            try:
+                f = open(pkgfile)
+            except IOError:
+                msg = sys.exc_info()[1]
+                sys.stderr.write("Can't open %s: %s\n" %
+                                 (pkgfile, msg))
+            else:
+                for line in f:
+                    line = line.rstrip('\n')
+                    if not line or line.startswith('#'):
+                        continue
+                    path.append(line) # Don't check for existence!
+                f.close()
+
+    return path
+
+def get_data(package, resource):
+    """Get a resource from a package.
+
+    This is a wrapper round the PEP 302 loader get_data API. The package
+    argument should be the name of a package, in standard module format
+    (foo.bar). The resource argument should be in the form of a relative
+    filename, using '/' as the path separator. The parent directory name '..'
+    is not allowed, and nor is a rooted name (starting with a '/').
+
+    The function returns a binary string, which is the contents of the
+    specified resource.
+
+    For packages located in the filesystem, which have already been imported,
+    this is the rough equivalent of
+
+        d = os.path.dirname(sys.modules[package].__file__)
+        data = open(os.path.join(d, resource), 'rb').read()
+
+    If the package cannot be located or loaded, or it uses a PEP 302 loader
+    which does not support get_data(), then None is returned.
+    """
+
+    loader = get_loader(package)
+    if loader is None or not hasattr(loader, 'get_data'):
+        return None
+    mod = sys.modules.get(package) or loader.load_module(package)
+    if mod is None or not hasattr(mod, '__file__'):
+        return None
+
+    # Modify the resource name to be compatible with the loader.get_data
+    # signature - an os.path format "filename" starting with the dirname of
+    # the package's __file__
+    parts = resource.split('/')
+    parts.insert(0, os.path.dirname(mod.__file__))
+    resource_name = os.path.join(*parts)
+    return loader.get_data(resource_name)
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/compat/tokenize.py b/deployment/lib/mitogen-0.2.9/mitogen/compat/tokenize.py
new file mode 100644
index 0000000000000000000000000000000000000000..0473c6a5c49086a94a4fc43c4ee4940988cff582
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/compat/tokenize.py
@@ -0,0 +1,453 @@
+"""Tokenization help for Python programs.
+
+generate_tokens(readline) is a generator that breaks a stream of
+text into Python tokens.  It accepts a readline-like method which is called
+repeatedly to get the next line of input (or "" for EOF).  It generates
+5-tuples with these members:
+
+    the token type (see token.py)
+    the token (a string)
+    the starting (row, column) indices of the token (a 2-tuple of ints)
+    the ending (row, column) indices of the token (a 2-tuple of ints)
+    the original line (string)
+
+It is designed to match the working of the Python tokenizer exactly, except
+that it produces COMMENT tokens for comments and gives type OP for all
+operators
+
+Older entry points
+    tokenize_loop(readline, tokeneater)
+    tokenize(readline, tokeneater=printtoken)
+are the same, except instead of generating tokens, tokeneater is a callback
+function to which the 5 fields described above are passed as 5 arguments,
+each time a new token is found."""
+
+# !mitogen: minify_safe
+
+__author__ = 'Ka-Ping Yee <ping@lfw.org>'
+__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
+               'Skip Montanaro, Raymond Hettinger')
+
+from itertools import chain
+import string, re
+from token import *
+
+import token
+__all__ = [x for x in dir(token) if not x.startswith("_")]
+__all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"]
+del token
+
+COMMENT = N_TOKENS
+tok_name[COMMENT] = 'COMMENT'
+NL = N_TOKENS + 1
+tok_name[NL] = 'NL'
+N_TOKENS += 2
+
+def group(*choices): return '(' + '|'.join(choices) + ')'
+def any(*choices): return group(*choices) + '*'
+def maybe(*choices): return group(*choices) + '?'
+
+Whitespace = r'[ \f\t]*'
+Comment = r'#[^\r\n]*'
+Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
+Name = r'[a-zA-Z_]\w*'
+
+Hexnumber = r'0[xX][\da-fA-F]+[lL]?'
+Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?'
+Binnumber = r'0[bB][01]+[lL]?'
+Decnumber = r'[1-9]\d*[lL]?'
+Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
+Exponent = r'[eE][-+]?\d+'
+Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
+Expfloat = r'\d+' + Exponent
+Floatnumber = group(Pointfloat, Expfloat)
+Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
+Number = group(Imagnumber, Floatnumber, Intnumber)
+
+# Tail end of ' string.
+Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
+# Tail end of " string.
+Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
+# Tail end of ''' string.
+Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
+# Tail end of """ string.
+Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
+Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""')
+# Single-line ' or " string.
+String = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+               r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+
+# Because of leftmost-then-longest match semantics, be sure to put the
+# longest operators first (e.g., if = came before ==, == would get
+# recognized as two instances of =).
+Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
+                 r"//=?",
+                 r"[+\-*/%&|^=<>]=?",
+                 r"~")
+
+Bracket = '[][(){}]'
+Special = group(r'\r?\n', r'[:;.,`@]')
+Funny = group(Operator, Bracket, Special)
+
+PlainToken = group(Number, Funny, String, Name)
+Token = Ignore + PlainToken
+
+# First (or only) line of ' or " string.
+ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+                group("'", r'\\\r?\n'),
+                r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+                group('"', r'\\\r?\n'))
+PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
+PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
+
+tokenprog, pseudoprog, single3prog, double3prog = map(
+    re.compile, (Token, PseudoToken, Single3, Double3))
+endprogs = {"'": re.compile(Single), '"': re.compile(Double),
+            "'''": single3prog, '"""': double3prog,
+            "r'''": single3prog, 'r"""': double3prog,
+            "u'''": single3prog, 'u"""': double3prog,
+            "ur'''": single3prog, 'ur"""': double3prog,
+            "R'''": single3prog, 'R"""': double3prog,
+            "U'''": single3prog, 'U"""': double3prog,
+            "uR'''": single3prog, 'uR"""': double3prog,
+            "Ur'''": single3prog, 'Ur"""': double3prog,
+            "UR'''": single3prog, 'UR"""': double3prog,
+            "b'''": single3prog, 'b"""': double3prog,
+            "br'''": single3prog, 'br"""': double3prog,
+            "B'''": single3prog, 'B"""': double3prog,
+            "bR'''": single3prog, 'bR"""': double3prog,
+            "Br'''": single3prog, 'Br"""': double3prog,
+            "BR'''": single3prog, 'BR"""': double3prog,
+            'r': None, 'R': None, 'u': None, 'U': None,
+            'b': None, 'B': None}
+
+triple_quoted = {}
+for t in ("'''", '"""',
+          "r'''", 'r"""', "R'''", 'R"""',
+          "u'''", 'u"""', "U'''", 'U"""',
+          "ur'''", 'ur"""', "Ur'''", 'Ur"""',
+          "uR'''", 'uR"""', "UR'''", 'UR"""',
+          "b'''", 'b"""', "B'''", 'B"""',
+          "br'''", 'br"""', "Br'''", 'Br"""',
+          "bR'''", 'bR"""', "BR'''", 'BR"""'):
+    triple_quoted[t] = t
+single_quoted = {}
+for t in ("'", '"',
+          "r'", 'r"', "R'", 'R"',
+          "u'", 'u"', "U'", 'U"',
+          "ur'", 'ur"', "Ur'", 'Ur"',
+          "uR'", 'uR"', "UR'", 'UR"',
+          "b'", 'b"', "B'", 'B"',
+          "br'", 'br"', "Br'", 'Br"',
+          "bR'", 'bR"', "BR'", 'BR"' ):
+    single_quoted[t] = t
+
+tabsize = 8
+
+class TokenError(Exception): pass
+
+class StopTokenizing(Exception): pass
+
+def printtoken(type, token, srow_scol, erow_ecol, line): # for testing
+    srow, scol = srow_scol
+    erow, ecol = erow_ecol
+    print("%d,%d-%d,%d:\t%s\t%s" % \
+        (srow, scol, erow, ecol, tok_name[type], repr(token)))
+
+def tokenize(readline, tokeneater=printtoken):
+    """
+    The tokenize() function accepts two parameters: one representing the
+    input stream, and one providing an output mechanism for tokenize().
+
+    The first parameter, readline, must be a callable object which provides
+    the same interface as the readline() method of built-in file objects.
+    Each call to the function should return one line of input as a string.
+
+    The second parameter, tokeneater, must also be a callable object. It is
+    called once for each token, with five arguments, corresponding to the
+    tuples generated by generate_tokens().
+    """
+    try:
+        tokenize_loop(readline, tokeneater)
+    except StopTokenizing:
+        pass
+
+# backwards compatible interface
+def tokenize_loop(readline, tokeneater):
+    for token_info in generate_tokens(readline):
+        tokeneater(*token_info)
+
+class Untokenizer:
+
+    def __init__(self):
+        self.tokens = []
+        self.prev_row = 1
+        self.prev_col = 0
+
+    def add_whitespace(self, start):
+        row, col = start
+        if row < self.prev_row or row == self.prev_row and col < self.prev_col:
+            raise ValueError("start ({},{}) precedes previous end ({},{})"
+                             .format(row, col, self.prev_row, self.prev_col))
+        row_offset = row - self.prev_row
+        if row_offset:
+            self.tokens.append("\\\n" * row_offset)
+            self.prev_col = 0
+        col_offset = col - self.prev_col
+        if col_offset:
+            self.tokens.append(" " * col_offset)
+
+    def untokenize(self, iterable):
+        it = iter(iterable)
+        indents = []
+        startline = False
+        for t in it:
+            if len(t) == 2:
+                self.compat(t, it)
+                break
+            tok_type, token, start, end, line = t
+            if tok_type == ENDMARKER:
+                break
+            if tok_type == INDENT:
+                indents.append(token)
+                continue
+            elif tok_type == DEDENT:
+                indents.pop()
+                self.prev_row, self.prev_col = end
+                continue
+            elif tok_type in (NEWLINE, NL):
+                startline = True
+            elif startline and indents:
+                indent = indents[-1]
+                if start[1] >= len(indent):
+                    self.tokens.append(indent)
+                    self.prev_col = len(indent)
+                startline = False
+            self.add_whitespace(start)
+            self.tokens.append(token)
+            self.prev_row, self.prev_col = end
+            if tok_type in (NEWLINE, NL):
+                self.prev_row += 1
+                self.prev_col = 0
+        return "".join(self.tokens)
+
+    def compat(self, token, iterable):
+        indents = []
+        toks_append = self.tokens.append
+        startline = token[0] in (NEWLINE, NL)
+        prevstring = False
+
+        for tok in chain([token], iterable):
+            toknum, tokval = tok[:2]
+
+            if toknum in (NAME, NUMBER):
+                tokval += ' '
+
+            # Insert a space between two consecutive strings
+            if toknum == STRING:
+                if prevstring:
+                    tokval = ' ' + tokval
+                prevstring = True
+            else:
+                prevstring = False
+
+            if toknum == INDENT:
+                indents.append(tokval)
+                continue
+            elif toknum == DEDENT:
+                indents.pop()
+                continue
+            elif toknum in (NEWLINE, NL):
+                startline = True
+            elif startline and indents:
+                toks_append(indents[-1])
+                startline = False
+            toks_append(tokval)
+
+def untokenize(iterable):
+    """Transform tokens back into Python source code.
+
+    Each element returned by the iterable must be a token sequence
+    with at least two elements, a token number and token value.  If
+    only two tokens are passed, the resulting output is poor.
+
+    Round-trip invariant for full input:
+        Untokenized source will match input source exactly
+
+    Round-trip invariant for limited intput:
+        # Output text will tokenize the back to the input
+        t1 = [tok[:2] for tok in generate_tokens(f.readline)]
+        newcode = untokenize(t1)
+        readline = iter(newcode.splitlines(1)).next
+        t2 = [tok[:2] for tok in generate_tokens(readline)]
+        assert t1 == t2
+    """
+    ut = Untokenizer()
+    return ut.untokenize(iterable)
+
+def generate_tokens(readline):
+    """
+    The generate_tokens() generator requires one argument, readline, which
+    must be a callable object which provides the same interface as the
+    readline() method of built-in file objects. Each call to the function
+    should return one line of input as a string.  Alternately, readline
+    can be a callable function terminating with StopIteration:
+        readline = open(myfile).next    # Example of alternate readline
+
+    The generator produces 5-tuples with these members: the token type; the
+    token string; a 2-tuple (srow, scol) of ints specifying the row and
+    column where the token begins in the source; a 2-tuple (erow, ecol) of
+    ints specifying the row and column where the token ends in the source;
+    and the line on which the token was found. The line passed is the
+    logical line; continuation lines are included.
+    """
+    lnum = parenlev = continued = 0
+    namechars, numchars = string.ascii_letters + '_', '0123456789'
+    contstr, needcont = '', 0
+    contline = None
+    indents = [0]
+
+    while 1:                                   # loop over lines in stream
+        try:
+            line = readline()
+        except StopIteration:
+            line = ''
+        lnum += 1
+        pos, max = 0, len(line)
+
+        if contstr:                            # continued string
+            if not line:
+                raise TokenError("EOF in multi-line string", strstart)
+            endmatch = endprog.match(line)
+            if endmatch:
+                pos = end = endmatch.end(0)
+                yield (STRING, contstr + line[:end],
+                       strstart, (lnum, end), contline + line)
+                contstr, needcont = '', 0
+                contline = None
+            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
+                yield (ERRORTOKEN, contstr + line,
+                           strstart, (lnum, len(line)), contline)
+                contstr = ''
+                contline = None
+                continue
+            else:
+                contstr = contstr + line
+                contline = contline + line
+                continue
+
+        elif parenlev == 0 and not continued:  # new statement
+            if not line: break
+            column = 0
+            while pos < max:                   # measure leading whitespace
+                if line[pos] == ' ':
+                    column += 1
+                elif line[pos] == '\t':
+                    column = (column//tabsize + 1)*tabsize
+                elif line[pos] == '\f':
+                    column = 0
+                else:
+                    break
+                pos += 1
+            if pos == max:
+                break
+
+            if line[pos] in '#\r\n':           # skip comments or blank lines
+                if line[pos] == '#':
+                    comment_token = line[pos:].rstrip('\r\n')
+                    nl_pos = pos + len(comment_token)
+                    yield (COMMENT, comment_token,
+                           (lnum, pos), (lnum, pos + len(comment_token)), line)
+                    yield (NL, line[nl_pos:],
+                           (lnum, nl_pos), (lnum, len(line)), line)
+                else:
+                    yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
+                           (lnum, pos), (lnum, len(line)), line)
+                continue
+
+            if column > indents[-1]:           # count indents or dedents
+                indents.append(column)
+                yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
+            while column < indents[-1]:
+                if column not in indents:
+                    raise IndentationError(
+                        "unindent does not match any outer indentation level",
+                        ("<tokenize>", lnum, pos, line))
+                indents = indents[:-1]
+                yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
+
+        else:                                  # continued statement
+            if not line:
+                raise TokenError("EOF in multi-line statement", (lnum, 0))
+            continued = 0
+
+        while pos < max:
+            pseudomatch = pseudoprog.match(line, pos)
+            if pseudomatch:                                # scan for tokens
+                start, end = pseudomatch.span(1)
+                spos, epos, pos = (lnum, start), (lnum, end), end
+                if start == end:
+                    continue
+                token, initial = line[start:end], line[start]
+
+                if initial in numchars or \
+                   (initial == '.' and token != '.'):      # ordinary number
+                    yield (NUMBER, token, spos, epos, line)
+                elif initial in '\r\n':
+                    if parenlev > 0:
+                        n = NL
+                    else:
+                        n = NEWLINE
+                    yield (n, token, spos, epos, line)
+                elif initial == '#':
+                    assert not token.endswith("\n")
+                    yield (COMMENT, token, spos, epos, line)
+                elif token in triple_quoted:
+                    endprog = endprogs[token]
+                    endmatch = endprog.match(line, pos)
+                    if endmatch:                           # all on one line
+                        pos = endmatch.end(0)
+                        token = line[start:pos]
+                        yield (STRING, token, spos, (lnum, pos), line)
+                    else:
+                        strstart = (lnum, start)           # multiple lines
+                        contstr = line[start:]
+                        contline = line
+                        break
+                elif initial in single_quoted or \
+                    token[:2] in single_quoted or \
+                    token[:3] in single_quoted:
+                    if token[-1] == '\n':                  # continued string
+                        strstart = (lnum, start)
+                        endprog = (endprogs[initial] or endprogs[token[1]] or
+                                   endprogs[token[2]])
+                        contstr, needcont = line[start:], 1
+                        contline = line
+                        break
+                    else:                                  # ordinary string
+                        yield (STRING, token, spos, epos, line)
+                elif initial in namechars:                 # ordinary name
+                    yield (NAME, token, spos, epos, line)
+                elif initial == '\\':                      # continued stmt
+                    continued = 1
+                else:
+                    if initial in '([{':
+                        parenlev += 1
+                    elif initial in ')]}':
+                        parenlev -= 1
+                    yield (OP, token, spos, epos, line)
+            else:
+                yield (ERRORTOKEN, line[pos],
+                           (lnum, pos), (lnum, pos+1), line)
+                pos += 1
+
+    for indent in indents[1:]:                 # pop remaining indent levels
+        yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
+    yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
+
+if __name__ == '__main__':                     # testing
+    import sys
+    if len(sys.argv) > 1:
+        tokenize(open(sys.argv[1]).readline)
+    else:
+        tokenize(sys.stdin.readline)
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/core.py b/deployment/lib/mitogen-0.2.9/mitogen/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8c57ba7802441ba9787cd16b5a1c7dc344710cb
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/core.py
@@ -0,0 +1,3997 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+This module implements most package functionality, but remains separate from
+non-essential code in order to reduce its size, since it is also serves as the
+bootstrap implementation sent to every new slave context.
+"""
+
+import binascii
+import collections
+import encodings.latin_1
+import encodings.utf_8
+import errno
+import fcntl
+import itertools
+import linecache
+import logging
+import os
+import pickle as py_pickle
+import pstats
+import signal
+import socket
+import struct
+import sys
+import syslog
+import threading
+import time
+import traceback
+import warnings
+import weakref
+import zlib
+
+# Python >3.7 deprecated the imp module.
+warnings.filterwarnings('ignore', message='the imp module is deprecated')
+import imp
+
+# Absolute imports for <2.5.
+select = __import__('select')
+
+try:
+    import cProfile
+except ImportError:
+    cProfile = None
+
+try:
+    import thread
+except ImportError:
+    import threading as thread
+
+try:
+    import cPickle as pickle
+except ImportError:
+    import pickle
+
+try:
+    from cStringIO import StringIO as BytesIO
+except ImportError:
+    from io import BytesIO
+
+try:
+    BaseException
+except NameError:
+    BaseException = Exception
+
+try:
+    ModuleNotFoundError
+except NameError:
+    ModuleNotFoundError = ImportError
+
+# TODO: usage of 'import' after setting __name__, but before fixing up
+# sys.modules generates a warning. This happens when profiling = True.
+warnings.filterwarnings('ignore',
+    "Parent module 'mitogen' not found while handling absolute import")
+
+LOG = logging.getLogger('mitogen')
+IOLOG = logging.getLogger('mitogen.io')
+IOLOG.setLevel(logging.INFO)
+
+# str.encode() may take import lock. Deadlock possible if broker calls
+# .encode() on behalf of thread currently waiting for module.
+LATIN1_CODEC = encodings.latin_1.Codec()
+
+_v = False
+_vv = False
+
+GET_MODULE = 100
+CALL_FUNCTION = 101
+FORWARD_LOG = 102
+ADD_ROUTE = 103
+DEL_ROUTE = 104
+ALLOCATE_ID = 105
+SHUTDOWN = 106
+LOAD_MODULE = 107
+FORWARD_MODULE = 108
+DETACHING = 109
+CALL_SERVICE = 110
+STUB_CALL_SERVICE = 111
+
+#: Special value used to signal disconnection or the inability to route a
+#: message, when it appears in the `reply_to` field. Usually causes
+#: :class:`mitogen.core.ChannelError` to be raised when it is received.
+#:
+#: It indicates the sender did not know how to process the message, or wishes
+#: no further messages to be delivered to it. It is used when:
+#:
+#:  * a remote receiver is disconnected or explicitly closed.
+#:  * a related message could not be delivered due to no route existing for it.
+#:  * a router is being torn down, as a sentinel value to notify
+#:    :meth:`mitogen.core.Router.add_handler` callbacks to clean up.
+IS_DEAD = 999
+
+try:
+    BaseException
+except NameError:
+    BaseException = Exception
+
+PY24 = sys.version_info < (2, 5)
+PY3 = sys.version_info > (3,)
+if PY3:
+    b = str.encode
+    BytesType = bytes
+    UnicodeType = str
+    FsPathTypes = (str,)
+    BufferType = lambda buf, start: memoryview(buf)[start:]
+    long = int
+else:
+    b = str
+    BytesType = str
+    FsPathTypes = (str, unicode)
+    BufferType = buffer
+    UnicodeType = unicode
+
+AnyTextType = (BytesType, UnicodeType)
+
+try:
+    next
+except NameError:
+    next = lambda it: it.next()
+
+# #550: prehistoric WSL did not advertise itself in uname output.
+try:
+    fp = open('/proc/sys/kernel/osrelease')
+    IS_WSL = 'Microsoft' in fp.read()
+    fp.close()
+except IOError:
+    IS_WSL = False
+
+
+#: Default size for calls to :meth:`Side.read` or :meth:`Side.write`, and the
+#: size of buffers configured by :func:`mitogen.parent.create_socketpair`. This
+#: value has many performance implications, 128KiB seems to be a sweet spot.
+#:
+#: * When set low, large messages cause many :class:`Broker` IO loop
+#:   iterations, burning CPU and reducing throughput.
+#: * When set high, excessive RAM is reserved by the OS for socket buffers (2x
+#:   per child), and an identically sized temporary userspace buffer is
+#:   allocated on each read that requires zeroing, and over a particular size
+#:   may require two system calls to allocate/deallocate.
+#:
+#: Care must be taken to ensure the underlying kernel object and receiving
+#: program support the desired size. For example,
+#:
+#: * Most UNIXes have TTYs with fixed 2KiB-4KiB buffers, making them unsuitable
+#:   for efficient IO.
+#: * Different UNIXes have varying presets for pipes, which may not be
+#:   configurable. On recent Linux the default pipe buffer size is 64KiB, but
+#:   under memory pressure may be as low as 4KiB for unprivileged processes.
+#: * When communication is via an intermediary process, its internal buffers
+#:   effect the speed OS buffers will drain. For example OpenSSH uses 64KiB
+#:   reads.
+#:
+#: An ideal :class:`Message` has a size that is a multiple of
+#: :data:`CHUNK_SIZE` inclusive of headers, to avoid wasting IO loop iterations
+#: writing small trailer chunks.
+CHUNK_SIZE = 131072
+
+_tls = threading.local()
+
+
+if __name__ == 'mitogen.core':
+    # When loaded using import mechanism, ExternalContext.main() will not have
+    # a chance to set the synthetic mitogen global, so just import it here.
+    import mitogen
+else:
+    # When loaded as __main__, ensure classes and functions gain a __module__
+    # attribute consistent with the host process, so that pickling succeeds.
+    __name__ = 'mitogen.core'
+
+
+class Error(Exception):
+    """
+    Base for all exceptions raised by Mitogen.
+
+    :param str fmt:
+        Exception text, or format string if `args` is non-empty.
+    :param tuple args:
+        Format string arguments.
+    """
+    def __init__(self, fmt=None, *args):
+        if args:
+            fmt %= args
+        if fmt and not isinstance(fmt, UnicodeType):
+            fmt = fmt.decode('utf-8')
+        Exception.__init__(self, fmt)
+
+
+class LatchError(Error):
+    """
+    Raised when an attempt is made to use a :class:`mitogen.core.Latch` that
+    has been marked closed.
+    """
+    pass
+
+
+class Blob(BytesType):
+    """
+    A serializable bytes subclass whose content is summarized in repr() output,
+    making it suitable for logging binary data.
+    """
+    def __repr__(self):
+        return '[blob: %d bytes]' % len(self)
+
+    def __reduce__(self):
+        return (Blob, (BytesType(self),))
+
+
+class Secret(UnicodeType):
+    """
+    A serializable unicode subclass whose content is masked in repr() output,
+    making it suitable for logging passwords.
+    """
+    def __repr__(self):
+        return '[secret]'
+
+    if not PY3:
+        # TODO: what is this needed for in 2.x?
+        def __str__(self):
+            return UnicodeType(self)
+
+    def __reduce__(self):
+        return (Secret, (UnicodeType(self),))
+
+
+class Kwargs(dict):
+    """
+    A serializable dict subclass that indicates its keys should be coerced to
+    Unicode on Python 3 and bytes on Python<2.6.
+
+    Python 2 produces keyword argument dicts whose keys are bytes, requiring a
+    helper to ensure compatibility with Python 3 where Unicode is required,
+    whereas Python 3 produces keyword argument dicts whose keys are Unicode,
+    requiring a helper for Python 2.4/2.5, where bytes are required.
+    """
+    if PY3:
+        def __init__(self, dct):
+            for k, v in dct.items():
+                if type(k) is bytes:
+                    self[k.decode()] = v
+                else:
+                    self[k] = v
+    elif sys.version_info < (2, 6, 5):
+        def __init__(self, dct):
+            for k, v in dct.iteritems():
+                if type(k) is unicode:
+                    k, _ = encodings.utf_8.encode(k)
+                self[k] = v
+
+    def __repr__(self):
+        return 'Kwargs(%s)' % (dict.__repr__(self),)
+
+    def __reduce__(self):
+        return (Kwargs, (dict(self),))
+
+
+class CallError(Error):
+    """
+    Serializable :class:`Error` subclass raised when :meth:`Context.call()
+    <mitogen.parent.Context.call>` fails. A copy of the traceback from the
+    external context is appended to the exception message.
+    """
+    def __init__(self, fmt=None, *args):
+        if not isinstance(fmt, BaseException):
+            Error.__init__(self, fmt, *args)
+        else:
+            e = fmt
+            cls = e.__class__
+            fmt = '%s.%s: %s' % (cls.__module__, cls.__name__, e)
+            tb = sys.exc_info()[2]
+            if tb:
+                fmt += '\n'
+                fmt += ''.join(traceback.format_tb(tb))
+            Error.__init__(self, fmt)
+
+    def __reduce__(self):
+        return (_unpickle_call_error, (self.args[0],))
+
+
+def _unpickle_call_error(s):
+    if not (type(s) is UnicodeType and len(s) < 10000):
+        raise TypeError('cannot unpickle CallError: bad input')
+    return CallError(s)
+
+
+class ChannelError(Error):
+    """
+    Raised when a channel dies or has been closed.
+    """
+    remote_msg = 'Channel closed by remote end.'
+    local_msg = 'Channel closed by local end.'
+
+
+class StreamError(Error):
+    """
+    Raised when a stream cannot be established.
+    """
+    pass
+
+
+class TimeoutError(Error):
+    """
+    Raised when a timeout occurs on a stream.
+    """
+    pass
+
+
+def to_text(o):
+    """
+    Coerce `o` to Unicode by decoding it from UTF-8 if it is an instance of
+    :class:`bytes`, otherwise pass it to the :class:`str` constructor. The
+    returned object is always a plain :class:`str`, any subclass is removed.
+    """
+    if isinstance(o, BytesType):
+        return o.decode('utf-8')
+    return UnicodeType(o)
+
+
+# Documented in api.rst to work around Sphinx limitation.
+now = getattr(time, 'monotonic', time.time)
+
+
+# Python 2.4
+try:
+    any
+except NameError:
+    def any(it):
+        for elem in it:
+            if elem:
+                return True
+
+
+def _partition(s, sep, find):
+    """
+    (str|unicode).(partition|rpartition) for Python 2.4/2.5.
+    """
+    idx = find(sep)
+    if idx != -1:
+        left = s[0:idx]
+        return left, sep, s[len(left)+len(sep):]
+
+
+if hasattr(UnicodeType, 'rpartition'):
+    str_partition = UnicodeType.partition
+    str_rpartition = UnicodeType.rpartition
+    bytes_partition = BytesType.partition
+else:
+    def str_partition(s, sep):
+        return _partition(s, sep, s.find) or (s, u'', u'')
+    def str_rpartition(s, sep):
+        return _partition(s, sep, s.rfind) or (u'', u'', s)
+    def bytes_partition(s, sep):
+        return _partition(s, sep, s.find) or (s, '', '')
+
+
+def _has_parent_authority(context_id):
+    return (
+        (context_id == mitogen.context_id) or
+        (context_id in mitogen.parent_ids)
+    )
+
+def has_parent_authority(msg, _stream=None):
+    """
+    Policy function for use with :class:`Receiver` and
+    :meth:`Router.add_handler` that requires incoming messages to originate
+    from a parent context, or on a :class:`Stream` whose :attr:`auth_id
+    <Stream.auth_id>` has been set to that of a parent context or the current
+    context.
+    """
+    return _has_parent_authority(msg.auth_id)
+
+
+def _signals(obj, signal):
+    return (
+        obj.__dict__
+        .setdefault('_signals', {})
+        .setdefault(signal, [])
+    )
+
+
+def listen(obj, name, func):
+    """
+    Arrange for `func()` to be invoked when signal `name` is fired on `obj`.
+    """
+    _signals(obj, name).append(func)
+
+
+def unlisten(obj, name, func):
+    """
+    Remove `func()` from the list of functions invoked when signal `name` is
+    fired by `obj`.
+
+    :raises ValueError:
+        `func()` was not on the list.
+    """
+    _signals(obj, name).remove(func)
+
+
+def fire(obj, name, *args, **kwargs):
+    """
+    Arrange for `func(*args, **kwargs)` to be invoked for every function
+    registered for signal `name` on `obj`.
+    """
+    for func in _signals(obj, name):
+        func(*args, **kwargs)
+
+
+def takes_econtext(func):
+    """
+    Decorator that marks a function or class method to automatically receive a
+    kwarg named `econtext`, referencing the
+    :class:`mitogen.core.ExternalContext` active in the context in which the
+    function is being invoked in. The decorator is only meaningful when the
+    function is invoked via :data:`CALL_FUNCTION <mitogen.core.CALL_FUNCTION>`.
+
+    When the function is invoked directly, `econtext` must still be passed to
+    it explicitly.
+    """
+    func.mitogen_takes_econtext = True
+    return func
+
+
+def takes_router(func):
+    """
+    Decorator that marks a function or class method to automatically receive a
+    kwarg named `router`, referencing the :class:`mitogen.core.Router` active
+    in the context in which the function is being invoked in. The decorator is
+    only meaningful when the function is invoked via :data:`CALL_FUNCTION
+    <mitogen.core.CALL_FUNCTION>`.
+
+    When the function is invoked directly, `router` must still be passed to it
+    explicitly.
+    """
+    func.mitogen_takes_router = True
+    return func
+
+
+def is_blacklisted_import(importer, fullname):
+    """
+    Return :data:`True` if `fullname` is part of a blacklisted package, or if
+    any packages have been whitelisted and `fullname` is not part of one.
+
+    NB:
+      - If a package is on both lists, then it is treated as blacklisted.
+      - If any package is whitelisted, then all non-whitelisted packages are
+        treated as blacklisted.
+    """
+    return ((not any(fullname.startswith(s) for s in importer.whitelist)) or
+                (any(fullname.startswith(s) for s in importer.blacklist)))
+
+
+def set_cloexec(fd):
+    """
+    Set the file descriptor `fd` to automatically close on :func:`os.execve`.
+    This has no effect on file descriptors inherited across :func:`os.fork`,
+    they must be explicitly closed through some other means, such as
+    :func:`mitogen.fork.on_fork`.
+    """
+    flags = fcntl.fcntl(fd, fcntl.F_GETFD)
+    assert fd > 2, 'fd %r <= 2' % (fd,)
+    fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
+
+
+def set_nonblock(fd):
+    """
+    Set the file descriptor `fd` to non-blocking mode. For most underlying file
+    types, this causes :func:`os.read` or :func:`os.write` to raise
+    :class:`OSError` with :data:`errno.EAGAIN` rather than block the thread
+    when the underlying kernel buffer is exhausted.
+    """
+    flags = fcntl.fcntl(fd, fcntl.F_GETFL)
+    fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
+
+
+def set_block(fd):
+    """
+    Inverse of :func:`set_nonblock`, i.e. cause `fd` to block the thread when
+    the underlying kernel buffer is exhausted.
+    """
+    flags = fcntl.fcntl(fd, fcntl.F_GETFL)
+    fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
+
+
+def io_op(func, *args):
+    """
+    Wrap `func(*args)` that may raise :class:`select.error`, :class:`IOError`,
+    or :class:`OSError`, trapping UNIX error codes relating to disconnection
+    and retry events in various subsystems:
+
+    * When a signal is delivered to the process on Python 2, system call retry
+      is signalled through :data:`errno.EINTR`. The invocation is automatically
+      restarted.
+    * When performing IO against a TTY, disconnection of the remote end is
+      signalled by :data:`errno.EIO`.
+    * When performing IO against a socket, disconnection of the remote end is
+      signalled by :data:`errno.ECONNRESET`.
+    * When performing IO against a pipe, disconnection of the remote end is
+      signalled by :data:`errno.EPIPE`.
+
+    :returns:
+        Tuple of `(return_value, disconnect_reason)`, where `return_value` is
+        the return value of `func(*args)`, and `disconnected` is an exception
+        instance when disconnection was detected, otherwise :data:`None`.
+    """
+    while True:
+        try:
+            return func(*args), None
+        except (select.error, OSError, IOError):
+            e = sys.exc_info()[1]
+            _vv and IOLOG.debug('io_op(%r) -> OSError: %s', func, e)
+            if e.args[0] == errno.EINTR:
+                continue
+            if e.args[0] in (errno.EIO, errno.ECONNRESET, errno.EPIPE):
+                return None, e
+            raise
+
+
+class PidfulStreamHandler(logging.StreamHandler):
+    """
+    A :class:`logging.StreamHandler` subclass used when
+    :meth:`Router.enable_debug() <mitogen.master.Router.enable_debug>` has been
+    called, or the `debug` parameter was specified during context construction.
+    Verifies the process ID has not changed on each call to :meth:`emit`,
+    reopening the associated log file when a change is detected.
+
+    This ensures logging to the per-process output files happens correctly even
+    when uncooperative third party components call :func:`os.fork`.
+    """
+    #: PID that last opened the log file.
+    open_pid = None
+
+    #: Output path template.
+    template = '/tmp/mitogen.%s.%s.log'
+
+    def _reopen(self):
+        self.acquire()
+        try:
+            if self.open_pid == os.getpid():
+                return
+            ts = time.strftime('%Y%m%d_%H%M%S')
+            path = self.template % (os.getpid(), ts)
+            self.stream = open(path, 'w', 1)
+            set_cloexec(self.stream.fileno())
+            self.stream.write('Parent PID: %s\n' % (os.getppid(),))
+            self.stream.write('Created by:\n\n%s\n' % (
+                ''.join(traceback.format_stack()),
+            ))
+            self.open_pid = os.getpid()
+        finally:
+            self.release()
+
+    def emit(self, record):
+        if self.open_pid != os.getpid():
+            self._reopen()
+        logging.StreamHandler.emit(self, record)
+
+
+def enable_debug_logging():
+    global _v, _vv
+    _v = True
+    _vv = True
+    root = logging.getLogger()
+    root.setLevel(logging.DEBUG)
+    IOLOG.setLevel(logging.DEBUG)
+    handler = PidfulStreamHandler()
+    handler.formatter = logging.Formatter(
+        '%(asctime)s %(levelname).1s %(name)s: %(message)s',
+        '%H:%M:%S'
+    )
+    root.handlers.insert(0, handler)
+
+
+_profile_hook = lambda name, func, *args: func(*args)
+_profile_fmt = os.environ.get(
+    'MITOGEN_PROFILE_FMT',
+    '/tmp/mitogen.stats.%(pid)s.%(identity)s.%(now)s.%(ext)s',
+)
+
+
+def _profile_hook(name, func, *args):
+    """
+    Call `func(*args)` and return its result. This function is replaced by
+    :func:`_real_profile_hook` when :func:`enable_profiling` is called. This
+    interface is obsolete and will be replaced by a signals-based integration
+    later on.
+    """
+    return func(*args)
+
+
+def _real_profile_hook(name, func, *args):
+    profiler = cProfile.Profile()
+    profiler.enable()
+    try:
+        return func(*args)
+    finally:
+        path = _profile_fmt % {
+            'now': int(1e6 * now()),
+            'identity': name,
+            'pid': os.getpid(),
+            'ext': '%s'
+        }
+        profiler.dump_stats(path % ('pstats',))
+        profiler.create_stats()
+        fp = open(path % ('log',), 'w')
+        try:
+            stats = pstats.Stats(profiler, stream=fp)
+            stats.sort_stats('cumulative')
+            stats.print_stats()
+        finally:
+            fp.close()
+
+
+def enable_profiling(econtext=None):
+    global _profile_hook
+    _profile_hook = _real_profile_hook
+
+
+def import_module(modname):
+    """
+    Import `module` and return the attribute named `attr`.
+    """
+    return __import__(modname, None, None, [''])
+
+
+def pipe():
+    """
+    Create a UNIX pipe pair using :func:`os.pipe`, wrapping the returned
+    descriptors in Python file objects in order to manage their lifetime and
+    ensure they are closed when their last reference is discarded and they have
+    not been closed explicitly.
+    """
+    rfd, wfd = os.pipe()
+    return (
+        os.fdopen(rfd, 'rb', 0),
+        os.fdopen(wfd, 'wb', 0)
+    )
+
+
+def iter_split(buf, delim, func):
+    """
+    Invoke `func(s)` for each `delim`-delimited chunk in the potentially large
+    `buf`, avoiding intermediate lists and quadratic string operations. Return
+    the trailing undelimited portion of `buf`, or any unprocessed portion of
+    `buf` after `func(s)` returned :data:`False`.
+
+    :returns:
+        `(trailer, cont)`, where `cont` is :data:`False` if the last call to
+        `func(s)` returned :data:`False`.
+    """
+    dlen = len(delim)
+    start = 0
+    cont = True
+    while cont:
+        nl = buf.find(delim, start)
+        if nl == -1:
+            break
+        cont = not func(buf[start:nl]) is False
+        start = nl + dlen
+    return buf[start:], cont
+
+
+class Py24Pickler(py_pickle.Pickler):
+    """
+    Exceptions were classic classes until Python 2.5. Sadly for 2.4, cPickle
+    offers little control over how a classic instance is pickled. Therefore 2.4
+    uses a pure-Python pickler, so CallError can be made to look as it does on
+    newer Pythons.
+
+    This mess will go away once proper serialization exists.
+    """
+    @classmethod
+    def dumps(cls, obj, protocol):
+        bio = BytesIO()
+        self = cls(bio, protocol=protocol)
+        self.dump(obj)
+        return bio.getvalue()
+
+    def save_exc_inst(self, obj):
+        if isinstance(obj, CallError):
+            func, args = obj.__reduce__()
+            self.save(func)
+            self.save(args)
+            self.write(py_pickle.REDUCE)
+        else:
+            py_pickle.Pickler.save_inst(self, obj)
+
+    if PY24:
+        dispatch = py_pickle.Pickler.dispatch.copy()
+        dispatch[py_pickle.InstanceType] = save_exc_inst
+
+
+if PY3:
+    # In 3.x Unpickler is a class exposing find_class as an overridable, but it
+    # cannot be overridden without subclassing.
+    class _Unpickler(pickle.Unpickler):
+        def find_class(self, module, func):
+            return self.find_global(module, func)
+    pickle__dumps = pickle.dumps
+elif PY24:
+    # On Python 2.4, we must use a pure-Python pickler.
+    pickle__dumps = Py24Pickler.dumps
+    _Unpickler = pickle.Unpickler
+else:
+    pickle__dumps = pickle.dumps
+    # In 2.x Unpickler is a function exposing a writeable find_global
+    # attribute.
+    _Unpickler = pickle.Unpickler
+
+
+class Message(object):
+    """
+    Messages are the fundamental unit of communication, comprising fields from
+    the :ref:`stream-protocol` header, an optional reference to the receiving
+    :class:`mitogen.core.Router` for ingress messages, and helper methods for
+    deserialization and generating replies.
+    """
+    #: Integer target context ID. :class:`Router` delivers messages locally
+    #: when their :attr:`dst_id` matches :data:`mitogen.context_id`, otherwise
+    #: they are routed up or downstream.
+    dst_id = None
+
+    #: Integer source context ID. Used as the target of replies if any are
+    #: generated.
+    src_id = None
+
+    #: Context ID under whose authority the message is acting. See
+    #: :ref:`source-verification`.
+    auth_id = None
+
+    #: Integer target handle in the destination context. This is one of the
+    #: :ref:`standard-handles`, or a dynamically generated handle used to
+    #: receive a one-time reply, such as the return value of a function call.
+    handle = None
+
+    #: Integer target handle to direct any reply to this message. Used to
+    #: receive a one-time reply, such as the return value of a function call.
+    #: :data:`IS_DEAD` has a special meaning when it appears in this field.
+    reply_to = None
+
+    #: Raw message data bytes.
+    data = b('')
+
+    _unpickled = object()
+
+    #: The :class:`Router` responsible for routing the message. This is
+    #: :data:`None` for locally originated messages.
+    router = None
+
+    #: The :class:`Receiver` over which the message was last received. Part of
+    #: the :class:`mitogen.select.Select` interface. Defaults to :data:`None`.
+    receiver = None
+
+    HEADER_FMT = '>hLLLLLL'
+    HEADER_LEN = struct.calcsize(HEADER_FMT)
+    HEADER_MAGIC = 0x4d49  # 'MI'
+
+    def __init__(self, **kwargs):
+        """
+        Construct a message from from the supplied `kwargs`. :attr:`src_id` and
+        :attr:`auth_id` are always set to :data:`mitogen.context_id`.
+        """
+        self.src_id = mitogen.context_id
+        self.auth_id = mitogen.context_id
+        vars(self).update(kwargs)
+        assert isinstance(self.data, BytesType), 'Message data is not Bytes'
+
+    def pack(self):
+        return (
+            struct.pack(self.HEADER_FMT, self.HEADER_MAGIC, self.dst_id,
+                        self.src_id, self.auth_id, self.handle,
+                        self.reply_to or 0, len(self.data))
+            + self.data
+        )
+
+    def _unpickle_context(self, context_id, name):
+        return _unpickle_context(context_id, name, router=self.router)
+
+    def _unpickle_sender(self, context_id, dst_handle):
+        return _unpickle_sender(self.router, context_id, dst_handle)
+
+    def _unpickle_bytes(self, s, encoding):
+        s, n = LATIN1_CODEC.encode(s)
+        return s
+
+    def _find_global(self, module, func):
+        """
+        Return the class implementing `module_name.class_name` or raise
+        `StreamError` if the module is not whitelisted.
+        """
+        if module == __name__:
+            if func == '_unpickle_call_error' or func == 'CallError':
+                return _unpickle_call_error
+            elif func == '_unpickle_sender':
+                return self._unpickle_sender
+            elif func == '_unpickle_context':
+                return self._unpickle_context
+            elif func == 'Blob':
+                return Blob
+            elif func == 'Secret':
+                return Secret
+            elif func == 'Kwargs':
+                return Kwargs
+        elif module == '_codecs' and func == 'encode':
+            return self._unpickle_bytes
+        elif module == '__builtin__' and func == 'bytes':
+            return BytesType
+        raise StreamError('cannot unpickle %r/%r', module, func)
+
+    @property
+    def is_dead(self):
+        """
+        :data:`True` if :attr:`reply_to` is set to the magic value
+        :data:`IS_DEAD`, indicating the sender considers the channel dead. Dead
+        messages can be raised in a variety of circumstances, see
+        :data:`IS_DEAD` for more information.
+        """
+        return self.reply_to == IS_DEAD
+
+    @classmethod
+    def dead(cls, reason=None, **kwargs):
+        """
+        Syntax helper to construct a dead message.
+        """
+        kwargs['data'], _ = encodings.utf_8.encode(reason or u'')
+        return cls(reply_to=IS_DEAD, **kwargs)
+
+    @classmethod
+    def pickled(cls, obj, **kwargs):
+        """
+        Construct a pickled message, setting :attr:`data` to the serialization
+        of `obj`, and setting remaining fields using `kwargs`.
+
+        :returns:
+            The new message.
+        """
+        self = cls(**kwargs)
+        try:
+            self.data = pickle__dumps(obj, protocol=2)
+        except pickle.PicklingError:
+            e = sys.exc_info()[1]
+            self.data = pickle__dumps(CallError(e), protocol=2)
+        return self
+
+    def reply(self, msg, router=None, **kwargs):
+        """
+        Compose a reply to this message and send it using :attr:`router`, or
+        `router` is :attr:`router` is :data:`None`.
+
+        :param obj:
+            Either a :class:`Message`, or an object to be serialized in order
+            to construct a new message.
+        :param router:
+            Optional router to use if :attr:`router` is :data:`None`.
+        :param kwargs:
+            Optional keyword parameters overriding message fields in the reply.
+        """
+        if not isinstance(msg, Message):
+            msg = Message.pickled(msg)
+        msg.dst_id = self.src_id
+        msg.handle = self.reply_to
+        vars(msg).update(kwargs)
+        if msg.handle:
+            (self.router or router).route(msg)
+        else:
+            LOG.debug('dropping reply to message with no return address: %r',
+                      msg)
+
+    if PY3:
+        UNPICKLER_KWARGS = {'encoding': 'bytes'}
+    else:
+        UNPICKLER_KWARGS = {}
+
+    def _throw_dead(self):
+        if len(self.data):
+            raise ChannelError(self.data.decode('utf-8', 'replace'))
+        elif self.src_id == mitogen.context_id:
+            raise ChannelError(ChannelError.local_msg)
+        else:
+            raise ChannelError(ChannelError.remote_msg)
+
+    def unpickle(self, throw=True, throw_dead=True):
+        """
+        Unpickle :attr:`data`, optionally raising any exceptions present.
+
+        :param bool throw_dead:
+            If :data:`True`, raise exceptions, otherwise it is the caller's
+            responsibility.
+
+        :raises CallError:
+            The serialized data contained CallError exception.
+        :raises ChannelError:
+            The `is_dead` field was set.
+        """
+        _vv and IOLOG.debug('%r.unpickle()', self)
+        if throw_dead and self.is_dead:
+            self._throw_dead()
+
+        obj = self._unpickled
+        if obj is Message._unpickled:
+            fp = BytesIO(self.data)
+            unpickler = _Unpickler(fp, **self.UNPICKLER_KWARGS)
+            unpickler.find_global = self._find_global
+            try:
+                # Must occur off the broker thread.
+                try:
+                    obj = unpickler.load()
+                except:
+                    LOG.error('raw pickle was: %r', self.data)
+                    raise
+                self._unpickled = obj
+            except (TypeError, ValueError):
+                e = sys.exc_info()[1]
+                raise StreamError('invalid message: %s', e)
+
+        if throw:
+            if isinstance(obj, CallError):
+                raise obj
+
+        return obj
+
+    def __repr__(self):
+        return 'Message(%r, %r, %r, %r, %r, %r..%d)' % (
+            self.dst_id, self.src_id, self.auth_id, self.handle,
+            self.reply_to, (self.data or '')[:50], len(self.data)
+        )
+
+
+class Sender(object):
+    """
+    Senders are used to send pickled messages to a handle in another context,
+    it is the inverse of :class:`mitogen.core.Receiver`.
+
+    Senders may be serialized, making them convenient to wire up data flows.
+    See :meth:`mitogen.core.Receiver.to_sender` for more information.
+
+    :param mitogen.core.Context context:
+        Context to send messages to.
+    :param int dst_handle:
+        Destination handle to send messages to.
+    """
+    def __init__(self, context, dst_handle):
+        self.context = context
+        self.dst_handle = dst_handle
+
+    def send(self, data):
+        """
+        Send `data` to the remote end.
+        """
+        _vv and IOLOG.debug('%r.send(%r..)', self, repr(data)[:100])
+        self.context.send(Message.pickled(data, handle=self.dst_handle))
+
+    explicit_close_msg = 'Sender was explicitly closed'
+
+    def close(self):
+        """
+        Send a dead message to the remote, causing :meth:`ChannelError` to be
+        raised in any waiting thread.
+        """
+        _vv and IOLOG.debug('%r.close()', self)
+        self.context.send(
+            Message.dead(
+                reason=self.explicit_close_msg,
+                handle=self.dst_handle
+            )
+        )
+
+    def __repr__(self):
+        return 'Sender(%r, %r)' % (self.context, self.dst_handle)
+
+    def __reduce__(self):
+        return _unpickle_sender, (self.context.context_id, self.dst_handle)
+
+
+def _unpickle_sender(router, context_id, dst_handle):
+    if not (isinstance(router, Router) and
+            isinstance(context_id, (int, long)) and context_id >= 0 and
+            isinstance(dst_handle, (int, long)) and dst_handle > 0):
+        raise TypeError('cannot unpickle Sender: bad input or missing router')
+    return Sender(Context(router, context_id), dst_handle)
+
+
+class Receiver(object):
+    """
+    Receivers maintain a thread-safe queue of messages sent to a handle of this
+    context from another context.
+
+    :param mitogen.core.Router router:
+        Router to register the handler on.
+
+    :param int handle:
+        If not :data:`None`, an explicit handle to register, otherwise an
+        unused handle is chosen.
+
+    :param bool persist:
+        If :data:`False`, unregister the handler after one message is received.
+        Single-message receivers are intended for RPC-like transactions, such
+        as in the case of :meth:`mitogen.parent.Context.call_async`.
+
+    :param mitogen.core.Context respondent:
+        Context this receiver is receiving from. If not :data:`None`, arranges
+        for the receiver to receive a dead message if messages can no longer be
+        routed to the context due to disconnection, and ignores messages that
+        did not originate from the respondent context.
+    """
+    #: If not :data:`None`, a function invoked as `notify(receiver)` after a
+    #: message has been received. The function is invoked on :class:`Broker`
+    #: thread, therefore it must not block. Used by
+    #: :class:`mitogen.select.Select` to efficiently implement waiting on
+    #: multiple event sources.
+    notify = None
+
+    raise_channelerror = True
+
+    def __init__(self, router, handle=None, persist=True,
+                 respondent=None, policy=None, overwrite=False):
+        self.router = router
+        #: The handle.
+        self.handle = handle  # Avoid __repr__ crash in add_handler()
+        self._latch = Latch()  # Must exist prior to .add_handler()
+        self.handle = router.add_handler(
+            fn=self._on_receive,
+            handle=handle,
+            policy=policy,
+            persist=persist,
+            respondent=respondent,
+            overwrite=overwrite,
+        )
+
+    def __repr__(self):
+        return 'Receiver(%r, %r)' % (self.router, self.handle)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, _1, _2, _3):
+        self.close()
+
+    def to_sender(self):
+        """
+        Return a :class:`Sender` configured to deliver messages to this
+        receiver. As senders are serializable, this makes it convenient to pass
+        `(context_id, handle)` pairs around::
+
+            def deliver_monthly_report(sender):
+                for line in open('monthly_report.txt'):
+                    sender.send(line)
+                sender.close()
+
+            @mitogen.main()
+            def main(router):
+                remote = router.ssh(hostname='mainframe')
+                recv = mitogen.core.Receiver(router)
+                remote.call(deliver_monthly_report, recv.to_sender())
+                for msg in recv:
+                    print(msg)
+        """
+        return Sender(self.router.myself(), self.handle)
+
+    def _on_receive(self, msg):
+        """
+        Callback registered for the handle with :class:`Router`; appends data
+        to the internal queue.
+        """
+        _vv and IOLOG.debug('%r._on_receive(%r)', self, msg)
+        self._latch.put(msg)
+        if self.notify:
+            self.notify(self)
+
+    closed_msg = 'the Receiver has been closed'
+
+    def close(self):
+        """
+        Unregister the receiver's handle from its associated router, and cause
+        :class:`ChannelError` to be raised in any thread waiting in :meth:`get`
+        on this receiver.
+        """
+        if self.handle:
+            self.router.del_handler(self.handle)
+            self.handle = None
+        self._latch.close()
+
+    def size(self):
+        """
+        Return the number of items currently buffered.
+
+        As with :class:`Queue.Queue`, `0` may be returned even though a
+        subsequent call to :meth:`get` will succeed, since a message may be
+        posted at any moment between :meth:`size` and :meth:`get`.
+
+        As with :class:`Queue.Queue`, `>0` may be returned even though a
+        subsequent call to :meth:`get` will block, since another waiting thread
+        may be woken at any moment between :meth:`size` and :meth:`get`.
+
+        :raises LatchError:
+            The underlying latch has already been marked closed.
+        """
+        return self._latch.size()
+
+    def empty(self):
+        """
+        Return `size() == 0`.
+
+        .. deprecated:: 0.2.8
+           Use :meth:`size` instead.
+
+        :raises LatchError:
+            The latch has already been marked closed.
+        """
+        return self._latch.empty()
+
+    def get(self, timeout=None, block=True, throw_dead=True):
+        """
+        Sleep waiting for a message to arrive on this receiver.
+
+        :param float timeout:
+            If not :data:`None`, specifies a timeout in seconds.
+
+        :raises mitogen.core.ChannelError:
+            The remote end indicated the channel should be closed,
+            communication with it was lost, or :meth:`close` was called in the
+            local process.
+
+        :raises mitogen.core.TimeoutError:
+            Timeout was reached.
+
+        :returns:
+            :class:`Message` that was received.
+        """
+        _vv and IOLOG.debug('%r.get(timeout=%r, block=%r)', self, timeout, block)
+        try:
+            msg = self._latch.get(timeout=timeout, block=block)
+        except LatchError:
+            raise ChannelError(self.closed_msg)
+        if msg.is_dead and throw_dead:
+            msg._throw_dead()
+        return msg
+
+    def __iter__(self):
+        """
+        Yield consecutive :class:`Message` instances delivered to this receiver
+        until :class:`ChannelError` is raised.
+        """
+        while True:
+            try:
+                msg = self.get()
+            except ChannelError:
+                return
+            yield msg
+
+
+class Channel(Sender, Receiver):
+    """
+    A channel inherits from :class:`mitogen.core.Sender` and
+    `mitogen.core.Receiver` to provide bidirectional functionality.
+
+    .. deprecated:: 0.2.0
+        This class is incomplete and obsolete, it will be removed in Mitogen
+        0.3.
+
+    Channels were an early attempt at syntax sugar. It is always easier to pass
+    around unidirectional pairs of senders/receivers, even though the syntax is
+    baroque:
+
+    .. literalinclude:: ../examples/ping_pong.py
+
+    Since all handles aren't known until after both ends are constructed, for
+    both ends to communicate through a channel, it is necessary for one end to
+    retrieve the handle allocated to the other and reconfigure its own channel
+    to match. Currently this is a manual task.
+    """
+    def __init__(self, router, context, dst_handle, handle=None):
+        Sender.__init__(self, context, dst_handle)
+        Receiver.__init__(self, router, handle)
+
+    def close(self):
+        Receiver.close(self)
+        Sender.close(self)
+
+    def __repr__(self):
+        return 'Channel(%s, %s)' % (
+            Sender.__repr__(self),
+            Receiver.__repr__(self)
+        )
+
+
+class Importer(object):
+    """
+    Import protocol implementation that fetches modules from the parent
+    process.
+
+    :param context: Context to communicate via.
+    """
+    # The Mitogen package is handled specially, since the child context must
+    # construct it manually during startup.
+    MITOGEN_PKG_CONTENT = [
+        'buildah',
+        'compat',
+        'debug',
+        'doas',
+        'docker',
+        'kubectl',
+        'fakessh',
+        'fork',
+        'jail',
+        'lxc',
+        'lxd',
+        'master',
+        'minify',
+        'os_fork',
+        'parent',
+        'select',
+        'service',
+        'setns',
+        'ssh',
+        'su',
+        'sudo',
+        'utils',
+    ]
+
+    ALWAYS_BLACKLIST = [
+        # 2.x generates needless imports for 'builtins', while 3.x does the
+        # same for '__builtin__'. The correct one is built-in, the other always
+        # a negative round-trip.
+        'builtins',
+        '__builtin__',
+        'thread',
+
+        # org.python.core imported by copy, pickle, xml.sax; breaks Jython, but
+        # very unlikely to trigger a bug report.
+        'org',
+    ]
+
+    if PY3:
+        ALWAYS_BLACKLIST += ['cStringIO']
+
+    def __init__(self, router, context, core_src, whitelist=(), blacklist=()):
+        self._log = logging.getLogger('mitogen.importer')
+        self._context = context
+        self._present = {'mitogen': self.MITOGEN_PKG_CONTENT}
+        self._lock = threading.Lock()
+        self.whitelist = list(whitelist) or ['']
+        self.blacklist = list(blacklist) + self.ALWAYS_BLACKLIST
+
+        # Preserve copies of the original server-supplied whitelist/blacklist
+        # for later use by children.
+        self.master_whitelist = self.whitelist[:]
+        self.master_blacklist = self.blacklist[:]
+
+        # Presence of an entry in this map indicates in-flight GET_MODULE.
+        self._callbacks = {}
+        self._cache = {}
+        if core_src:
+            self._update_linecache('x/mitogen/core.py', core_src)
+            self._cache['mitogen.core'] = (
+                'mitogen.core',
+                None,
+                'x/mitogen/core.py',
+                zlib.compress(core_src, 9),
+                [],
+            )
+        self._install_handler(router)
+
+    def _update_linecache(self, path, data):
+        """
+        The Python 2.4 linecache module, used to fetch source code for
+        tracebacks and :func:`inspect.getsource`, does not support PEP-302,
+        meaning it needs extra help to for Mitogen-loaded modules. Directly
+        populate its cache if a loaded module belongs to the Mitogen package.
+        """
+        if PY24 and 'mitogen' in path:
+            linecache.cache[path] = (
+                len(data),
+                0.0,
+                [line+'\n' for line in data.splitlines()],
+                path,
+            )
+
+    def _install_handler(self, router):
+        router.add_handler(
+            fn=self._on_load_module,
+            handle=LOAD_MODULE,
+            policy=has_parent_authority,
+        )
+
+    def __repr__(self):
+        return 'Importer'
+
+    def builtin_find_module(self, fullname):
+        # imp.find_module() will always succeed for __main__, because it is a
+        # built-in module. That means it exists on a special linked list deep
+        # within the bowels of the interpreter. We must special case it.
+        if fullname == '__main__':
+            raise ModuleNotFoundError()
+
+        parent, _, modname = str_rpartition(fullname, '.')
+        if parent:
+            path = sys.modules[parent].__path__
+        else:
+            path = None
+
+        fp, pathname, description = imp.find_module(modname, path)
+        if fp:
+            fp.close()
+
+    def find_module(self, fullname, path=None):
+        if hasattr(_tls, 'running'):
+            return None
+
+        _tls.running = True
+        try:
+            #_v and self._log.debug('Python requested %r', fullname)
+            fullname = to_text(fullname)
+            pkgname, dot, _ = str_rpartition(fullname, '.')
+            pkg = sys.modules.get(pkgname)
+            if pkgname and getattr(pkg, '__loader__', None) is not self:
+                self._log.debug('%s is submodule of a locally loaded package',
+                                fullname)
+                return None
+
+            suffix = fullname[len(pkgname+dot):]
+            if pkgname and suffix not in self._present.get(pkgname, ()):
+                self._log.debug('%s has no submodule %s', pkgname, suffix)
+                return None
+
+            # #114: explicitly whitelisted prefixes override any
+            # system-installed package.
+            if self.whitelist != ['']:
+                if any(fullname.startswith(s) for s in self.whitelist):
+                    return self
+
+            try:
+                self.builtin_find_module(fullname)
+                _vv and self._log.debug('%r is available locally', fullname)
+            except ImportError:
+                _vv and self._log.debug('we will try to load %r', fullname)
+                return self
+        finally:
+            del _tls.running
+
+    blacklisted_msg = (
+        '%r is present in the Mitogen importer blacklist, therefore this '
+        'context will not attempt to request it from the master, as the '
+        'request will always be refused.'
+    )
+    pkg_resources_msg = (
+        'pkg_resources is prohibited from importing __main__, as it causes '
+        'problems in applications whose main module is not designed to be '
+        're-imported by children.'
+    )
+    absent_msg = (
+        'The Mitogen master process was unable to serve %r. It may be a '
+        'native Python extension, or it may be missing entirely. Check the '
+        'importer debug logs on the master for more information.'
+    )
+
+    def _refuse_imports(self, fullname):
+        if is_blacklisted_import(self, fullname):
+            raise ModuleNotFoundError(self.blacklisted_msg % (fullname,))
+
+        f = sys._getframe(2)
+        requestee = f.f_globals['__name__']
+
+        if fullname == '__main__' and requestee == 'pkg_resources':
+            # Anything that imports pkg_resources will eventually cause
+            # pkg_resources to try and scan __main__ for its __requires__
+            # attribute (pkg_resources/__init__.py::_build_master()). This
+            # breaks any app that is not expecting its __main__ to suddenly be
+            # sucked over a network and injected into a remote process, like
+            # py.test.
+            raise ModuleNotFoundError(self.pkg_resources_msg)
+
+        if fullname == 'pbr':
+            # It claims to use pkg_resources to read version information, which
+            # would result in PEP-302 being used, but it actually does direct
+            # filesystem access. So instead smodge the environment to override
+            # any version that was defined. This will probably break something
+            # later.
+            os.environ['PBR_VERSION'] = '0.0.0'
+
+    def _on_load_module(self, msg):
+        if msg.is_dead:
+            return
+
+        tup = msg.unpickle()
+        fullname = tup[0]
+        _v and self._log.debug('received %s', fullname)
+
+        self._lock.acquire()
+        try:
+            self._cache[fullname] = tup
+            if tup[2] is not None and PY24:
+                self._update_linecache(
+                    path='master:' + tup[2],
+                    data=zlib.decompress(tup[3])
+                )
+            callbacks = self._callbacks.pop(fullname, [])
+        finally:
+            self._lock.release()
+
+        for callback in callbacks:
+            callback()
+
+    def _request_module(self, fullname, callback):
+        self._lock.acquire()
+        try:
+            present = fullname in self._cache
+            if not present:
+                funcs = self._callbacks.get(fullname)
+                if funcs is not None:
+                    _v and self._log.debug('existing request for %s in flight',
+                                           fullname)
+                    funcs.append(callback)
+                else:
+                    _v and self._log.debug('sending new %s request to parent',
+                                           fullname)
+                    self._callbacks[fullname] = [callback]
+                    self._context.send(
+                        Message(data=b(fullname), handle=GET_MODULE)
+                    )
+        finally:
+            self._lock.release()
+
+        if present:
+            callback()
+
+    def load_module(self, fullname):
+        fullname = to_text(fullname)
+        _v and self._log.debug('requesting %s', fullname)
+        self._refuse_imports(fullname)
+
+        event = threading.Event()
+        self._request_module(fullname, event.set)
+        event.wait()
+
+        ret = self._cache[fullname]
+        if ret[2] is None:
+            raise ModuleNotFoundError(self.absent_msg % (fullname,))
+
+        pkg_present = ret[1]
+        mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
+        mod.__file__ = self.get_filename(fullname)
+        mod.__loader__ = self
+        if pkg_present is not None:  # it's a package.
+            mod.__path__ = []
+            mod.__package__ = fullname
+            self._present[fullname] = pkg_present
+        else:
+            mod.__package__ = str_rpartition(fullname, '.')[0] or None
+
+        if mod.__package__ and not PY3:
+            # 2.x requires __package__ to be exactly a string.
+            mod.__package__, _ = encodings.utf_8.encode(mod.__package__)
+
+        source = self.get_source(fullname)
+        try:
+            code = compile(source, mod.__file__, 'exec', 0, 1)
+        except SyntaxError:
+            LOG.exception('while importing %r', fullname)
+            raise
+
+        if PY3:
+            exec(code, vars(mod))
+        else:
+            exec('exec code in vars(mod)')
+
+        # #590: if a module replaces itself in sys.modules during import, below
+        # is necessary. This matches PyImport_ExecCodeModuleEx()
+        return sys.modules.get(fullname, mod)
+
+    def get_filename(self, fullname):
+        if fullname in self._cache:
+            path = self._cache[fullname][2]
+            if path is None:
+                # If find_loader() returns self but a subsequent master RPC
+                # reveals the module can't be loaded, and so load_module()
+                # throws ImportError, on Python 3.x it is still possible for
+                # the loader to be called to fetch metadata.
+                raise ModuleNotFoundError(self.absent_msg % (fullname,))
+            return u'master:' + self._cache[fullname][2]
+
+    def get_source(self, fullname):
+        if fullname in self._cache:
+            compressed = self._cache[fullname][3]
+            if compressed is None:
+                raise ModuleNotFoundError(self.absent_msg % (fullname,))
+
+            source = zlib.decompress(self._cache[fullname][3])
+            if PY3:
+                return to_text(source)
+            return source
+
+
+class LogHandler(logging.Handler):
+    """
+    A :class:`logging.Handler` subclass that arranges for :data:`FORWARD_LOG`
+    messages to be sent to a parent context in response to logging messages
+    generated by the current context. This is installed by default in child
+    contexts during bootstrap, so that :mod:`logging` events can be viewed and
+    managed centrally in the master process.
+
+    The handler is initially *corked* after construction, such that it buffers
+    messages until :meth:`uncork` is called. This allows logging to be
+    installed prior to communication with the target being available, and
+    avoids any possible race where early log messages might be dropped.
+
+    :param mitogen.core.Context context:
+        The context to send log messages towards. At present this is always
+        the master process.
+    """
+    def __init__(self, context):
+        logging.Handler.__init__(self)
+        self.context = context
+        self.local = threading.local()
+        self._buffer = []
+        # Private synchronization is needed while corked, to ensure no
+        # concurrent call to _send() exists during uncork().
+        self._buffer_lock = threading.Lock()
+
+    def uncork(self):
+        """
+        #305: during startup :class:`LogHandler` may be installed before it is
+        possible to route messages, therefore messages are buffered until
+        :meth:`uncork` is called by :class:`ExternalContext`.
+        """
+        self._buffer_lock.acquire()
+        try:
+            self._send = self.context.send
+            for msg in self._buffer:
+                self._send(msg)
+            self._buffer = None
+        finally:
+            self._buffer_lock.release()
+
+    def _send(self, msg):
+        self._buffer_lock.acquire()
+        try:
+            if self._buffer is None:
+                # uncork() may run concurrent to _send()
+                self._send(msg)
+            else:
+                self._buffer.append(msg)
+        finally:
+            self._buffer_lock.release()
+
+    def emit(self, rec):
+        """
+        Send a :data:`FORWARD_LOG` message towards the target context.
+        """
+        if rec.name == 'mitogen.io' or \
+           getattr(self.local, 'in_emit', False):
+            return
+
+        self.local.in_emit = True
+        try:
+            msg = self.format(rec)
+            encoded = '%s\x00%s\x00%s' % (rec.name, rec.levelno, msg)
+            if isinstance(encoded, UnicodeType):
+                # Logging package emits both :(
+                encoded = encoded.encode('utf-8')
+            self._send(Message(data=encoded, handle=FORWARD_LOG))
+        finally:
+            self.local.in_emit = False
+
+
+class Stream(object):
+    """
+    A :class:`Stream` is one readable and optionally one writeable file
+    descriptor (represented by :class:`Side`) aggregated alongside an
+    associated :class:`Protocol` that knows how to respond to IO readiness
+    events for those descriptors.
+
+    Streams are registered with :class:`Broker`, and callbacks are invoked on
+    the broker thread in response to IO activity. When registered using
+    :meth:`Broker.start_receive` or :meth:`Broker._start_transmit`, the broker
+    may call any of :meth:`on_receive`, :meth:`on_transmit`,
+    :meth:`on_shutdown` or :meth:`on_disconnect`.
+
+    It is expected that the :class:`Protocol` associated with a stream will
+    change over its life. For example during connection setup, the initial
+    protocol may be :class:`mitogen.parent.BootstrapProtocol` that knows how to
+    enter SSH and sudo passwords and transmit the :mod:`mitogen.core` source to
+    the target, before handing off to :class:`MitogenProtocol` when the target
+    process is initialized.
+
+    Streams connecting to children are in turn aggregated by
+    :class:`mitogen.parent.Connection`, which contains additional logic for
+    managing any child process, and a reference to any separate ``stderr``
+    :class:`Stream` connected to that process.
+    """
+    #: A :class:`Side` representing the stream's receive file descriptor.
+    receive_side = None
+
+    #: A :class:`Side` representing the stream's transmit file descriptor.
+    transmit_side = None
+
+    #: A :class:`Protocol` representing the protocol active on the stream.
+    protocol = None
+
+    #: In parents, the :class:`mitogen.parent.Connection` instance.
+    conn = None
+
+    #: The stream name. This is used in the :meth:`__repr__` output in any log
+    #: messages, it may be any descriptive string.
+    name = u'default'
+
+    def set_protocol(self, protocol):
+        """
+        Bind a :class:`Protocol` to this stream, by updating
+        :attr:`Protocol.stream` to refer to this stream, and updating this
+        stream's :attr:`Stream.protocol` to the refer to the protocol. Any
+        prior protocol's :attr:`Protocol.stream` is set to :data:`None`.
+        """
+        if self.protocol:
+            self.protocol.stream = None
+        self.protocol = protocol
+        self.protocol.stream = self
+
+    def accept(self, rfp, wfp):
+        """
+        Attach a pair of file objects to :attr:`receive_side` and
+        :attr:`transmit_side`, after wrapping them in :class:`Side` instances.
+        :class:`Side` will call :func:`set_nonblock` and :func:`set_cloexec`
+        on the underlying file descriptors during construction.
+
+        The same file object may be used for both sides. The default
+        :meth:`on_disconnect` is handles the possibility that only one
+        descriptor may need to be closed.
+
+        :param file rfp:
+            The file object to receive from.
+        :param file wfp:
+            The file object to transmit to.
+        """
+        self.receive_side = Side(self, rfp)
+        self.transmit_side = Side(self, wfp)
+
+    def __repr__(self):
+        return "<Stream %s #%04x>" % (self.name, id(self) & 0xffff,)
+
+    def on_receive(self, broker):
+        """
+        Invoked by :class:`Broker` when the stream's :attr:`receive_side` has
+        been marked readable using :meth:`Broker.start_receive` and the broker
+        has detected the associated file descriptor is ready for reading.
+
+        Subclasses must implement this if they are registered using
+        :meth:`Broker.start_receive`, and the method must invoke
+        :meth:`on_disconnect` if reading produces an empty string.
+
+        The default implementation reads :attr:`Protocol.read_size` bytes and
+        passes the resulting bytestring to :meth:`Protocol.on_receive`. If the
+        bytestring is 0 bytes, invokes :meth:`on_disconnect` instead.
+        """
+        buf = self.receive_side.read(self.protocol.read_size)
+        if not buf:
+            LOG.debug('%r: empty read, disconnecting', self.receive_side)
+            return self.on_disconnect(broker)
+
+        self.protocol.on_receive(broker, buf)
+
+    def on_transmit(self, broker):
+        """
+        Invoked by :class:`Broker` when the stream's :attr:`transmit_side` has
+        been marked writeable using :meth:`Broker._start_transmit` and the
+        broker has detected the associated file descriptor is ready for
+        writing.
+
+        Subclasses must implement they are ever registerd with
+        :meth:`Broker._start_transmit`.
+
+        The default implementation invokes :meth:`Protocol.on_transmit`.
+        """
+        self.protocol.on_transmit(broker)
+
+    def on_shutdown(self, broker):
+        """
+        Invoked by :meth:`Broker.shutdown` to allow the stream time to
+        gracefully shutdown.
+
+        The default implementation emits a ``shutdown`` signal before
+        invoking :meth:`on_disconnect`.
+        """
+        fire(self, 'shutdown')
+        self.protocol.on_shutdown(broker)
+
+    def on_disconnect(self, broker):
+        """
+        Invoked by :class:`Broker` to force disconnect the stream during
+        shutdown, invoked by the default :meth:`on_shutdown` implementation,
+        and usually invoked by any subclass :meth:`on_receive` implementation
+        in response to a 0-byte read.
+
+        The base implementation fires a ``disconnect`` event, then closes
+        :attr:`receive_side` and :attr:`transmit_side` after unregistering the
+        stream from the broker.
+        """
+        fire(self, 'disconnect')
+        self.protocol.on_disconnect(broker)
+
+
+class Protocol(object):
+    """
+    Implement the program behaviour associated with activity on a
+    :class:`Stream`. The protocol in use may vary over a stream's life, for
+    example to allow :class:`mitogen.parent.BootstrapProtocol` to initialize
+    the connected child before handing it off to :class:`MitogenProtocol`. A
+    stream's active protocol is tracked in the :attr:`Stream.protocol`
+    attribute, and modified via :meth:`Stream.set_protocol`.
+
+    Protocols do not handle IO, they are entirely reliant on the interface
+    provided by :class:`Stream` and :class:`Side`, allowing the underlying IO
+    implementation to be replaced without modifying behavioural logic.
+    """
+    stream_class = Stream
+
+    #: The :class:`Stream` this protocol is currently bound to, or
+    #: :data:`None`.
+    stream = None
+
+    #: The size of the read buffer used by :class:`Stream` when this is the
+    #: active protocol for the stream.
+    read_size = CHUNK_SIZE
+
+    @classmethod
+    def build_stream(cls, *args, **kwargs):
+        stream = cls.stream_class()
+        stream.set_protocol(cls(*args, **kwargs))
+        return stream
+
+    def __repr__(self):
+        return '%s(%s)' % (
+            self.__class__.__name__,
+            self.stream and self.stream.name,
+        )
+
+    def on_shutdown(self, broker):
+        _v and LOG.debug('%r: shutting down', self)
+        self.stream.on_disconnect(broker)
+
+    def on_disconnect(self, broker):
+        # Normally both sides an FD, so it is important that tranmit_side is
+        # deregistered from Poller before closing the receive side, as pollers
+        # like epoll and kqueue unregister all events on FD close, causing
+        # subsequent attempt to unregister the transmit side to fail.
+        LOG.debug('%r: disconnecting', self)
+        broker.stop_receive(self.stream)
+        if self.stream.transmit_side:
+            broker._stop_transmit(self.stream)
+
+        self.stream.receive_side.close()
+        if self.stream.transmit_side:
+            self.stream.transmit_side.close()
+
+
+class DelimitedProtocol(Protocol):
+    """
+    Provide a :meth:`Protocol.on_receive` implementation for protocols that are
+    delimited by a fixed string, like text based protocols. Each message is
+    passed to :meth:`on_line_received` as it arrives, with incomplete messages
+    passed to :meth:`on_partial_line_received`.
+
+    When emulating user input it is often necessary to respond to incomplete
+    lines, such as when a "Password: " prompt is sent.
+    :meth:`on_partial_line_received` may be called repeatedly with an
+    increasingly complete message. When a complete message is finally received,
+    :meth:`on_line_received` will be called once for it before the buffer is
+    discarded.
+
+    If :func:`on_line_received` returns :data:`False`, remaining data is passed
+    unprocessed to the stream's current protocol's :meth:`on_receive`. This
+    allows switching from line-oriented to binary while the input buffer
+    contains both kinds of data.
+    """
+    #: The delimiter. Defaults to newline.
+    delimiter = b('\n')
+    _trailer = b('')
+
+    def on_receive(self, broker, buf):
+        _vv and IOLOG.debug('%r.on_receive()', self)
+        stream = self.stream
+        self._trailer, cont = mitogen.core.iter_split(
+            buf=self._trailer + buf,
+            delim=self.delimiter,
+            func=self.on_line_received,
+        )
+
+        if self._trailer:
+            if cont:
+                self.on_partial_line_received(self._trailer)
+            else:
+                assert stream.protocol is not self, \
+                    'stream protocol is no longer %r' % (self,)
+                stream.protocol.on_receive(broker, self._trailer)
+
+    def on_line_received(self, line):
+        """
+        Receive a line from the stream.
+
+        :param bytes line:
+            The encoded line, excluding the delimiter.
+        :returns:
+            :data:`False` to indicate this invocation modified the stream's
+            active protocol, and any remaining buffered data should be passed
+            to the new protocol's :meth:`on_receive` method.
+
+            Any other return value is ignored.
+        """
+        pass
+
+    def on_partial_line_received(self, line):
+        """
+        Receive a trailing unterminated partial line from the stream.
+
+        :param bytes line:
+            The encoded partial line.
+        """
+        pass
+
+
+class BufferedWriter(object):
+    """
+    Implement buffered output while avoiding quadratic string operations. This
+    is currently constructed by each protocol, in future it may become fixed
+    for each stream instead.
+    """
+    def __init__(self, broker, protocol):
+        self._broker = broker
+        self._protocol = protocol
+        self._buf = collections.deque()
+        self._len = 0
+
+    def write(self, s):
+        """
+        Transmit `s` immediately, falling back to enqueuing it and marking the
+        stream writeable if no OS buffer space is available.
+        """
+        if not self._len:
+            # Modifying epoll/Kqueue state is expensive, as are needless broker
+            # loops. Rather than wait for writeability, just write immediately,
+            # and fall back to the broker loop on error or full buffer.
+            try:
+                n = self._protocol.stream.transmit_side.write(s)
+                if n:
+                    if n == len(s):
+                        return
+                    s = s[n:]
+            except OSError:
+                pass
+
+            self._broker._start_transmit(self._protocol.stream)
+        self._buf.append(s)
+        self._len += len(s)
+
+    def on_transmit(self, broker):
+        """
+        Respond to stream writeability by retrying previously buffered
+        :meth:`write` calls.
+        """
+        if self._buf:
+            buf = self._buf.popleft()
+            written = self._protocol.stream.transmit_side.write(buf)
+            if not written:
+                _v and LOG.debug('disconnected during write to %r', self)
+                self._protocol.stream.on_disconnect(broker)
+                return
+            elif written != len(buf):
+                self._buf.appendleft(BufferType(buf, written))
+
+            _vv and IOLOG.debug('transmitted %d bytes to %r', written, self)
+            self._len -= written
+
+        if not self._buf:
+            broker._stop_transmit(self._protocol.stream)
+
+
+class Side(object):
+    """
+    Represent one side of a :class:`Stream`. This allows unidirectional (e.g.
+    pipe) and bidirectional (e.g. socket) streams to operate identically.
+
+    Sides are also responsible for tracking the open/closed state of the
+    underlying FD, preventing erroneous duplicate calls to :func:`os.close` due
+    to duplicate :meth:`Stream.on_disconnect` calls, which would otherwise risk
+    silently succeeding by closing an unrelated descriptor. For this reason, it
+    is crucial only one file object exists per unique descriptor.
+
+    :param mitogen.core.Stream stream:
+        The stream this side is associated with.
+    :param object fp:
+        The file or socket object managing the underlying file descriptor. Any
+        object may be used that supports `fileno()` and `close()` methods.
+    :param bool cloexec:
+        If :data:`True`, the descriptor has its :data:`fcntl.FD_CLOEXEC` flag
+        enabled using :func:`fcntl.fcntl`.
+    :param bool keep_alive:
+        If :data:`True`, the continued existence of this side will extend the
+        shutdown grace period until it has been unregistered from the broker.
+    :param bool blocking:
+        If :data:`False`, the descriptor has its :data:`os.O_NONBLOCK` flag
+        enabled using :func:`fcntl.fcntl`.
+    """
+    _fork_refs = weakref.WeakValueDictionary()
+    closed = False
+
+    def __init__(self, stream, fp, cloexec=True, keep_alive=True, blocking=False):
+        #: The :class:`Stream` for which this is a read or write side.
+        self.stream = stream
+        # File or socket object responsible for the lifetime of its underlying
+        # file descriptor.
+        self.fp = fp
+        #: Integer file descriptor to perform IO on, or :data:`None` if
+        #: :meth:`close` has been called. This is saved separately from the
+        #: file object, since :meth:`file.fileno` cannot be called on it after
+        #: it has been closed.
+        self.fd = fp.fileno()
+        #: If :data:`True`, causes presence of this side in
+        #: :class:`Broker`'s active reader set to defer shutdown until the
+        #: side is disconnected.
+        self.keep_alive = keep_alive
+        self._fork_refs[id(self)] = self
+        if cloexec:
+            set_cloexec(self.fd)
+        if not blocking:
+            set_nonblock(self.fd)
+
+    def __repr__(self):
+        return '<Side of %s fd %s>' % (
+            self.stream.name or repr(self.stream),
+            self.fd
+        )
+
+    @classmethod
+    def _on_fork(cls):
+        while cls._fork_refs:
+            _, side = cls._fork_refs.popitem()
+            _vv and IOLOG.debug('Side._on_fork() closing %r', side)
+            side.close()
+
+    def close(self):
+        """
+        Call :meth:`file.close` on :attr:`fp` if it is not :data:`None`,
+        then set it to :data:`None`.
+        """
+        _vv and IOLOG.debug('%r.close()', self)
+        if not self.closed:
+            self.closed = True
+            self.fp.close()
+
+    def read(self, n=CHUNK_SIZE):
+        """
+        Read up to `n` bytes from the file descriptor, wrapping the underlying
+        :func:`os.read` call with :func:`io_op` to trap common disconnection
+        conditions.
+
+        :meth:`read` always behaves as if it is reading from a regular UNIX
+        file; socket, pipe, and TTY disconnection errors are masked and result
+        in a 0-sized read like a regular file.
+
+        :returns:
+            Bytes read, or the empty string to indicate disconnection was
+            detected.
+        """
+        if self.closed:
+            # Refuse to touch the handle after closed, it may have been reused
+            # by another thread. TODO: synchronize read()/write()/close().
+            return b('')
+        s, disconnected = io_op(os.read, self.fd, n)
+        if disconnected:
+            LOG.debug('%r: disconnected during read: %s', self, disconnected)
+            return b('')
+        return s
+
+    def write(self, s):
+        """
+        Write as much of the bytes from `s` as possible to the file descriptor,
+        wrapping the underlying :func:`os.write` call with :func:`io_op` to
+        trap common disconnection conditions.
+
+        :returns:
+            Number of bytes written, or :data:`None` if disconnection was
+            detected.
+        """
+        if self.closed:
+            # Don't touch the handle after close, it may be reused elsewhere.
+            return None
+
+        written, disconnected = io_op(os.write, self.fd, s)
+        if disconnected:
+            LOG.debug('%r: disconnected during write: %s', self, disconnected)
+            return None
+        return written
+
+
+class MitogenProtocol(Protocol):
+    """
+    :class:`Protocol` implementing mitogen's :ref:`stream protocol
+    <stream-protocol>`.
+    """
+    #: If not :data:`False`, indicates the stream has :attr:`auth_id` set and
+    #: its value is the same as :data:`mitogen.context_id` or appears in
+    #: :data:`mitogen.parent_ids`.
+    is_privileged = False
+
+    #: Invoked as `on_message(stream, msg)` each message received from the
+    #: peer.
+    on_message = None
+
+    def __init__(self, router, remote_id, auth_id=None,
+                 local_id=None, parent_ids=None):
+        self._router = router
+        self.remote_id = remote_id
+        #: If not :data:`None`, :class:`Router` stamps this into
+        #: :attr:`Message.auth_id` of every message received on this stream.
+        self.auth_id = auth_id
+
+        if parent_ids is None:
+            parent_ids = mitogen.parent_ids
+        if local_id is None:
+            local_id = mitogen.context_id
+
+        self.is_privileged = (
+            (remote_id in parent_ids) or
+            auth_id in ([local_id] + parent_ids)
+        )
+        self.sent_modules = set(['mitogen', 'mitogen.core'])
+        self._input_buf = collections.deque()
+        self._input_buf_len = 0
+        self._writer = BufferedWriter(router.broker, self)
+
+        #: Routing records the dst_id of every message arriving from this
+        #: stream. Any arriving DEL_ROUTE is rebroadcast for any such ID.
+        self.egress_ids = set()
+
+    def on_receive(self, broker, buf):
+        """
+        Handle the next complete message on the stream. Raise
+        :class:`StreamError` on failure.
+        """
+        _vv and IOLOG.debug('%r.on_receive()', self)
+        if self._input_buf and self._input_buf_len < 128:
+            self._input_buf[0] += buf
+        else:
+            self._input_buf.append(buf)
+
+        self._input_buf_len += len(buf)
+        while self._receive_one(broker):
+            pass
+
+    corrupt_msg = (
+        '%s: Corruption detected: frame signature incorrect. This likely means'
+        ' some external process is interfering with the connection. Received:'
+        '\n\n'
+        '%r'
+    )
+
+    def _receive_one(self, broker):
+        if self._input_buf_len < Message.HEADER_LEN:
+            return False
+
+        msg = Message()
+        msg.router = self._router
+        (magic, msg.dst_id, msg.src_id, msg.auth_id,
+         msg.handle, msg.reply_to, msg_len) = struct.unpack(
+            Message.HEADER_FMT,
+            self._input_buf[0][:Message.HEADER_LEN],
+        )
+
+        if magic != Message.HEADER_MAGIC:
+            LOG.error(self.corrupt_msg, self.stream.name, self._input_buf[0][:2048])
+            self.stream.on_disconnect(broker)
+            return False
+
+        if msg_len > self._router.max_message_size:
+            LOG.error('%r: Maximum message size exceeded (got %d, max %d)',
+                      self, msg_len, self._router.max_message_size)
+            self.stream.on_disconnect(broker)
+            return False
+
+        total_len = msg_len + Message.HEADER_LEN
+        if self._input_buf_len < total_len:
+            _vv and IOLOG.debug(
+                '%r: Input too short (want %d, got %d)',
+                self, msg_len, self._input_buf_len - Message.HEADER_LEN
+            )
+            return False
+
+        start = Message.HEADER_LEN
+        prev_start = start
+        remain = total_len
+        bits = []
+        while remain:
+            buf = self._input_buf.popleft()
+            bit = buf[start:remain]
+            bits.append(bit)
+            remain -= len(bit) + start
+            prev_start = start
+            start = 0
+
+        msg.data = b('').join(bits)
+        self._input_buf.appendleft(buf[prev_start+len(bit):])
+        self._input_buf_len -= total_len
+        self._router._async_route(msg, self.stream)
+        return True
+
+    def pending_bytes(self):
+        """
+        Return the number of bytes queued for transmission on this stream. This
+        can be used to limit the amount of data buffered in RAM by an otherwise
+        unlimited consumer.
+
+        For an accurate result, this method should be called from the Broker
+        thread, for example by using :meth:`Broker.defer_sync`.
+        """
+        return self._writer._len
+
+    def on_transmit(self, broker):
+        """
+        Transmit buffered messages.
+        """
+        _vv and IOLOG.debug('%r.on_transmit()', self)
+        self._writer.on_transmit(broker)
+
+    def _send(self, msg):
+        _vv and IOLOG.debug('%r._send(%r)', self, msg)
+        self._writer.write(msg.pack())
+
+    def send(self, msg):
+        """
+        Send `data` to `handle`, and tell the broker we have output. May be
+        called from any thread.
+        """
+        self._router.broker.defer(self._send, msg)
+
+    def on_shutdown(self, broker):
+        """
+        Disable :class:`Protocol` immediate disconnect behaviour.
+        """
+        _v and LOG.debug('%r: shutting down', self)
+
+
+class Context(object):
+    """
+    Represent a remote context regardless of the underlying connection method.
+    Context objects are simple facades that emit messages through an
+    associated router, and have :ref:`signals` raised against them in response
+    to various events relating to the context.
+
+    **Note:** This is the somewhat limited core version, used by child
+    contexts. The master subclass is documented below this one.
+
+    Contexts maintain no internal state and are thread-safe.
+
+    Prefer :meth:`Router.context_by_id` over constructing context objects
+    explicitly, as that method is deduplicating, and returns the only context
+    instance :ref:`signals` will be raised on.
+
+    :param mitogen.core.Router router:
+        Router to emit messages through.
+    :param int context_id:
+        Context ID.
+    :param str name:
+        Context name.
+    """
+    name = None
+    remote_name = None
+
+    def __init__(self, router, context_id, name=None):
+        self.router = router
+        self.context_id = context_id
+        if name:
+            self.name = to_text(name)
+
+    def __reduce__(self):
+        return _unpickle_context, (self.context_id, self.name)
+
+    def on_disconnect(self):
+        _v and LOG.debug('%r: disconnecting', self)
+        fire(self, 'disconnect')
+
+    def send_async(self, msg, persist=False):
+        """
+        Arrange for `msg` to be delivered to this context, with replies
+        directed to a newly constructed receiver. :attr:`dst_id
+        <Message.dst_id>` is set to the target context ID, and :attr:`reply_to
+        <Message.reply_to>` is set to the newly constructed receiver's handle.
+
+        :param bool persist:
+            If :data:`False`, the handler will be unregistered after a single
+            message has been received.
+
+        :param mitogen.core.Message msg:
+            The message.
+
+        :returns:
+            :class:`Receiver` configured to receive any replies sent to the
+            message's `reply_to` handle.
+        """
+        receiver = Receiver(self.router, persist=persist, respondent=self)
+        msg.dst_id = self.context_id
+        msg.reply_to = receiver.handle
+
+        _v and LOG.debug('sending message to %r: %r', self, msg)
+        self.send(msg)
+        return receiver
+
+    def call_service_async(self, service_name, method_name, **kwargs):
+        if isinstance(service_name, BytesType):
+            service_name = service_name.encode('utf-8')
+        elif not isinstance(service_name, UnicodeType):
+            service_name = service_name.name()  # Service.name()
+        _v and LOG.debug('calling service %s.%s of %r, args: %r',
+                         service_name, method_name, self, kwargs)
+        tup = (service_name, to_text(method_name), Kwargs(kwargs))
+        msg = Message.pickled(tup, handle=CALL_SERVICE)
+        return self.send_async(msg)
+
+    def send(self, msg):
+        """
+        Arrange for `msg` to be delivered to this context. :attr:`dst_id
+        <Message.dst_id>` is set to the target context ID.
+
+        :param Message msg:
+            Message.
+        """
+        msg.dst_id = self.context_id
+        self.router.route(msg)
+
+    def call_service(self, service_name, method_name, **kwargs):
+        recv = self.call_service_async(service_name, method_name, **kwargs)
+        return recv.get().unpickle()
+
+    def send_await(self, msg, deadline=None):
+        """
+        Like :meth:`send_async`, but expect a single reply (`persist=False`)
+        delivered within `deadline` seconds.
+
+        :param mitogen.core.Message msg:
+            The message.
+        :param float deadline:
+            If not :data:`None`, seconds before timing out waiting for a reply.
+        :returns:
+            Deserialized reply.
+        :raises TimeoutError:
+            No message was received and `deadline` passed.
+        """
+        receiver = self.send_async(msg)
+        response = receiver.get(deadline)
+        data = response.unpickle()
+        _vv and IOLOG.debug('%r._send_await() -> %r', self, data)
+        return data
+
+    def __repr__(self):
+        return 'Context(%s, %r)' % (self.context_id, self.name)
+
+
+def _unpickle_context(context_id, name, router=None):
+    if not (isinstance(context_id, (int, long)) and context_id >= 0 and (
+        (name is None) or
+        (isinstance(name, UnicodeType) and len(name) < 100))
+    ):
+        raise TypeError('cannot unpickle Context: bad input')
+
+    if isinstance(router, Router):
+        return router.context_by_id(context_id, name=name)
+    return Context(None, context_id, name)  # For plain Jane pickle.
+
+
+class Poller(object):
+    """
+    A poller manages OS file descriptors the user is waiting to become
+    available for IO. The :meth:`poll` method blocks the calling thread
+    until one or more become ready. The default implementation is based on
+    :func:`select.poll`.
+
+    Each descriptor has an associated `data` element, which is unique for each
+    readiness type, and defaults to being the same as the file descriptor. The
+    :meth:`poll` method yields the data associated with a descriptor, rather
+    than the descriptor itself, allowing concise loops like::
+
+        p = Poller()
+        p.start_receive(conn.fd, data=conn.on_read)
+        p.start_transmit(conn.fd, data=conn.on_write)
+
+        for callback in p.poll():
+            callback()  # invoke appropriate bound instance method
+
+    Pollers may be modified while :meth:`poll` is yielding results. Removals
+    are processed immediately, causing pending events for the descriptor to be
+    discarded.
+
+    The :meth:`close` method must be called when a poller is discarded to avoid
+    a resource leak.
+
+    Pollers may only be used by one thread at a time.
+    """
+    SUPPORTED = True
+
+    # This changed from select() to poll() in Mitogen 0.2.4. Since poll() has
+    # no upper FD limit, it is suitable for use with Latch, which must handle
+    # FDs larger than select's limit during many-host runs. We want this
+    # because poll() requires no setup and teardown: just a single system call,
+    # which is important because Latch.get() creates a Poller on each
+    # invocation. In a microbenchmark, poll() vs. epoll_ctl() is 30% faster in
+    # this scenario. If select() must return in future, it is important
+    # Latch.poller_class is set from parent.py to point to the industrial
+    # strength poller for the OS, otherwise Latch will fail randomly.
+
+    #: Increments on every poll(). Used to version _rfds and _wfds.
+    _generation = 1
+
+    def __init__(self):
+        self._rfds = {}
+        self._wfds = {}
+
+    def __repr__(self):
+        return '%s' % (type(self).__name__,)
+
+    def _update(self, fd):
+        """
+        Required by PollPoller subclass.
+        """
+        pass
+
+    @property
+    def readers(self):
+        """
+        Return a list of `(fd, data)` tuples for every FD registered for
+        receive readiness.
+        """
+        return list((fd, data) for fd, (data, gen) in self._rfds.items())
+
+    @property
+    def writers(self):
+        """
+        Return a list of `(fd, data)` tuples for every FD registered for
+        transmit readiness.
+        """
+        return list((fd, data) for fd, (data, gen) in self._wfds.items())
+
+    def close(self):
+        """
+        Close any underlying OS resource used by the poller.
+        """
+        pass
+
+    def start_receive(self, fd, data=None):
+        """
+        Cause :meth:`poll` to yield `data` when `fd` is readable.
+        """
+        self._rfds[fd] = (data or fd, self._generation)
+        self._update(fd)
+
+    def stop_receive(self, fd):
+        """
+        Stop yielding readability events for `fd`.
+
+        Redundant calls to :meth:`stop_receive` are silently ignored, this may
+        change in future.
+        """
+        self._rfds.pop(fd, None)
+        self._update(fd)
+
+    def start_transmit(self, fd, data=None):
+        """
+        Cause :meth:`poll` to yield `data` when `fd` is writeable.
+        """
+        self._wfds[fd] = (data or fd, self._generation)
+        self._update(fd)
+
+    def stop_transmit(self, fd):
+        """
+        Stop yielding writeability events for `fd`.
+
+        Redundant calls to :meth:`stop_transmit` are silently ignored, this may
+        change in future.
+        """
+        self._wfds.pop(fd, None)
+        self._update(fd)
+
+    def _poll(self, timeout):
+        (rfds, wfds, _), _ = io_op(select.select,
+            self._rfds,
+            self._wfds,
+            (), timeout
+        )
+
+        for fd in rfds:
+            _vv and IOLOG.debug('%r: POLLIN for %r', self, fd)
+            data, gen = self._rfds.get(fd, (None, None))
+            if gen and gen < self._generation:
+                yield data
+
+        for fd in wfds:
+            _vv and IOLOG.debug('%r: POLLOUT for %r', self, fd)
+            data, gen = self._wfds.get(fd, (None, None))
+            if gen and gen < self._generation:
+                yield data
+
+    def poll(self, timeout=None):
+        """
+        Block the calling thread until one or more FDs are ready for IO.
+
+        :param float timeout:
+            If not :data:`None`, seconds to wait without an event before
+            returning an empty iterable.
+        :returns:
+            Iterable of `data` elements associated with ready FDs.
+        """
+        _vv and IOLOG.debug('%r.poll(%r)', self, timeout)
+        self._generation += 1
+        return self._poll(timeout)
+
+
+class Latch(object):
+    """
+    A latch is a :class:`Queue.Queue`-like object that supports mutation and
+    waiting from multiple threads, however unlike :class:`Queue.Queue`,
+    waiting threads always remain interruptible, so CTRL+C always succeeds, and
+    waits where a timeout is set experience no wake up latency. These
+    properties are not possible in combination using the built-in threading
+    primitives available in Python 2.x.
+
+    Latches implement queues using the UNIX self-pipe trick, and a per-thread
+    :func:`socket.socketpair` that is lazily created the first time any
+    latch attempts to sleep on a thread, and dynamically associated with the
+    waiting Latch only for duration of the wait.
+
+    See :ref:`waking-sleeping-threads` for further discussion.
+    """
+    #: The :class:`Poller` implementation to use for waiting. Since the poller
+    #: will be very short-lived, we prefer :class:`mitogen.parent.PollPoller`
+    #: if it is available, or :class:`mitogen.core.Poller` otherwise, since
+    #: these implementations require no system calls to create, configure or
+    #: destroy.
+    poller_class = Poller
+
+    #: If not :data:`None`, a function invoked as `notify(latch)` after a
+    #: successful call to :meth:`put`. The function is invoked on the
+    #: :meth:`put` caller's thread, which may be the :class:`Broker` thread,
+    #: therefore it must not block. Used by :class:`mitogen.select.Select` to
+    #: efficiently implement waiting on multiple event sources.
+    notify = None
+
+    # The _cls_ prefixes here are to make it crystal clear in the code which
+    # state mutation isn't covered by :attr:`_lock`.
+
+    #: List of reusable :func:`socket.socketpair` tuples. The list is mutated
+    #: from multiple threads, the only safe operations are `append()` and
+    #: `pop()`.
+    _cls_idle_socketpairs = []
+
+    #: List of every socket object that must be closed by :meth:`_on_fork`.
+    #: Inherited descriptors cannot be reused, as the duplicated handles
+    #: reference the same underlying kernel object in use by the parent.
+    _cls_all_sockets = []
+
+    def __init__(self):
+        self.closed = False
+        self._lock = threading.Lock()
+        #: List of unconsumed enqueued items.
+        self._queue = []
+        #: List of `(wsock, cookie)` awaiting an element, where `wsock` is the
+        #: socketpair's write side, and `cookie` is the string to write.
+        self._sleeping = []
+        #: Number of elements of :attr:`_sleeping` that have already been
+        #: woken, and have a corresponding element index from :attr:`_queue`
+        #: assigned to them.
+        self._waking = 0
+
+    @classmethod
+    def _on_fork(cls):
+        """
+        Clean up any files belonging to the parent process after a fork.
+        """
+        cls._cls_idle_socketpairs = []
+        while cls._cls_all_sockets:
+            cls._cls_all_sockets.pop().close()
+
+    def close(self):
+        """
+        Mark the latch as closed, and cause every sleeping thread to be woken,
+        with :class:`mitogen.core.LatchError` raised in each thread.
+        """
+        self._lock.acquire()
+        try:
+            self.closed = True
+            while self._waking < len(self._sleeping):
+                wsock, cookie = self._sleeping[self._waking]
+                self._wake(wsock, cookie)
+                self._waking += 1
+        finally:
+            self._lock.release()
+
+    def size(self):
+        """
+        Return the number of items currently buffered.
+
+        As with :class:`Queue.Queue`, `0` may be returned even though a
+        subsequent call to :meth:`get` will succeed, since a message may be
+        posted at any moment between :meth:`size` and :meth:`get`.
+
+        As with :class:`Queue.Queue`, `>0` may be returned even though a
+        subsequent call to :meth:`get` will block, since another waiting thread
+        may be woken at any moment between :meth:`size` and :meth:`get`.
+
+        :raises LatchError:
+            The latch has already been marked closed.
+        """
+        self._lock.acquire()
+        try:
+            if self.closed:
+                raise LatchError()
+            return len(self._queue)
+        finally:
+            self._lock.release()
+
+    def empty(self):
+        """
+        Return `size() == 0`.
+
+        .. deprecated:: 0.2.8
+           Use :meth:`size` instead.
+
+        :raises LatchError:
+            The latch has already been marked closed.
+        """
+        return self.size() == 0
+
+    def _get_socketpair(self):
+        """
+        Return an unused socketpair, creating one if none exist.
+        """
+        try:
+            return self._cls_idle_socketpairs.pop()  # pop() must be atomic
+        except IndexError:
+            rsock, wsock = socket.socketpair()
+            rsock.setblocking(False)
+            set_cloexec(rsock.fileno())
+            set_cloexec(wsock.fileno())
+            self._cls_all_sockets.extend((rsock, wsock))
+            return rsock, wsock
+
+    COOKIE_MAGIC, = struct.unpack('L', b('LTCH') * (struct.calcsize('L')//4))
+    COOKIE_FMT = '>Qqqq'  # #545: id() and get_ident() may exceed long on armhfp.
+    COOKIE_SIZE = struct.calcsize(COOKIE_FMT)
+
+    def _make_cookie(self):
+        """
+        Return a string encoding the ID of the process, instance and thread.
+        This disambiguates legitimate wake-ups, accidental writes to the FD,
+        and buggy internal FD sharing.
+        """
+        return struct.pack(self.COOKIE_FMT, self.COOKIE_MAGIC,
+                           os.getpid(), id(self), thread.get_ident())
+
+    def get(self, timeout=None, block=True):
+        """
+        Return the next enqueued object, or sleep waiting for one.
+
+        :param float timeout:
+            If not :data:`None`, specifies a timeout in seconds.
+
+        :param bool block:
+            If :data:`False`, immediately raise
+            :class:`mitogen.core.TimeoutError` if the latch is empty.
+
+        :raises mitogen.core.LatchError:
+            :meth:`close` has been called, and the object is no longer valid.
+
+        :raises mitogen.core.TimeoutError:
+            Timeout was reached.
+
+        :returns:
+            The de-queued object.
+        """
+        _vv and IOLOG.debug('%r.get(timeout=%r, block=%r)',
+                            self, timeout, block)
+        self._lock.acquire()
+        try:
+            if self.closed:
+                raise LatchError()
+            i = len(self._sleeping)
+            if len(self._queue) > i:
+                _vv and IOLOG.debug('%r.get() -> %r', self, self._queue[i])
+                return self._queue.pop(i)
+            if not block:
+                raise TimeoutError()
+            rsock, wsock = self._get_socketpair()
+            cookie = self._make_cookie()
+            self._sleeping.append((wsock, cookie))
+        finally:
+            self._lock.release()
+
+        poller = self.poller_class()
+        poller.start_receive(rsock.fileno())
+        try:
+            return self._get_sleep(poller, timeout, block, rsock, wsock, cookie)
+        finally:
+            poller.close()
+
+    def _get_sleep(self, poller, timeout, block, rsock, wsock, cookie):
+        """
+        When a result is not immediately available, sleep waiting for
+        :meth:`put` to write a byte to our socket pair.
+        """
+        _vv and IOLOG.debug(
+            '%r._get_sleep(timeout=%r, block=%r, fd=%d/%d)',
+            self, timeout, block, rsock.fileno(), wsock.fileno()
+        )
+
+        e = None
+        try:
+            list(poller.poll(timeout))
+        except Exception:
+            e = sys.exc_info()[1]
+
+        self._lock.acquire()
+        try:
+            i = self._sleeping.index((wsock, cookie))
+            del self._sleeping[i]
+
+            try:
+                got_cookie = rsock.recv(self.COOKIE_SIZE)
+            except socket.error:
+                e2 = sys.exc_info()[1]
+                if e2.args[0] == errno.EAGAIN:
+                    e = TimeoutError()
+                else:
+                    e = e2
+
+            self._cls_idle_socketpairs.append((rsock, wsock))
+            if e:
+                raise e
+
+            assert cookie == got_cookie, (
+                "Cookie incorrect; got %r, expected %r" \
+                % (binascii.hexlify(got_cookie),
+                   binascii.hexlify(cookie))
+            )
+            assert i < self._waking, (
+                "Cookie correct, but no queue element assigned."
+            )
+            self._waking -= 1
+            if self.closed:
+                raise LatchError()
+            _vv and IOLOG.debug('%r.get() wake -> %r', self, self._queue[i])
+            return self._queue.pop(i)
+        finally:
+            self._lock.release()
+
+    def put(self, obj=None):
+        """
+        Enqueue an object, waking the first thread waiting for a result, if one
+        exists.
+
+        :param obj:
+            Object to enqueue. Defaults to :data:`None` as a convenience when
+            using :class:`Latch` only for synchronization.
+        :raises mitogen.core.LatchError:
+            :meth:`close` has been called, and the object is no longer valid.
+        """
+        _vv and IOLOG.debug('%r.put(%r)', self, obj)
+        self._lock.acquire()
+        try:
+            if self.closed:
+                raise LatchError()
+            self._queue.append(obj)
+
+            wsock = None
+            if self._waking < len(self._sleeping):
+                wsock, cookie = self._sleeping[self._waking]
+                self._waking += 1
+                _vv and IOLOG.debug('%r.put() -> waking wfd=%r',
+                                    self, wsock.fileno())
+            elif self.notify:
+                self.notify(self)
+        finally:
+            self._lock.release()
+
+        if wsock:
+            self._wake(wsock, cookie)
+
+    def _wake(self, wsock, cookie):
+        written, disconnected = io_op(os.write, wsock.fileno(), cookie)
+        assert written == len(cookie) and not disconnected
+
+    def __repr__(self):
+        return 'Latch(%#x, size=%d, t=%r)' % (
+            id(self),
+            len(self._queue),
+            threading.currentThread().getName(),
+        )
+
+
+class Waker(Protocol):
+    """
+    :class:`Protocol` implementing the `UNIX self-pipe trick`_. Used to wake
+    :class:`Broker` when another thread needs to modify its state, by enqueing
+    a function call to run on the :class:`Broker` thread.
+
+    .. _UNIX self-pipe trick: https://cr.yp.to/docs/selfpipe.html
+    """
+    read_size = 1
+    broker_ident = None
+
+    @classmethod
+    def build_stream(cls, broker):
+        stream = super(Waker, cls).build_stream(broker)
+        stream.accept(*pipe())
+        return stream
+
+    def __init__(self, broker):
+        self._broker = broker
+        self._deferred = collections.deque()
+
+    def __repr__(self):
+        return 'Waker(fd=%r/%r)' % (
+            self.stream.receive_side and self.stream.receive_side.fd,
+            self.stream.transmit_side and self.stream.transmit_side.fd,
+        )
+
+    @property
+    def keep_alive(self):
+        """
+        Prevent immediate Broker shutdown while deferred functions remain.
+        """
+        return len(self._deferred)
+
+    def on_receive(self, broker, buf):
+        """
+        Drain the pipe and fire callbacks. Since :attr:`_deferred` is
+        synchronized, :meth:`defer` and :meth:`on_receive` can conspire to
+        ensure only one byte needs to be pending regardless of queue length.
+        """
+        _vv and IOLOG.debug('%r.on_receive()', self)
+        while True:
+            try:
+                func, args, kwargs = self._deferred.popleft()
+            except IndexError:
+                return
+
+            try:
+                func(*args, **kwargs)
+            except Exception:
+                LOG.exception('defer() crashed: %r(*%r, **%r)',
+                              func, args, kwargs)
+                broker.shutdown()
+
+    def _wake(self):
+        """
+        Wake the multiplexer by writing a byte. If Broker is midway through
+        teardown, the FD may already be closed, so ignore EBADF.
+        """
+        try:
+            self.stream.transmit_side.write(b(' '))
+        except OSError:
+            e = sys.exc_info()[1]
+            if e.args[0] in (errno.EBADF, errno.EWOULDBLOCK):
+                raise
+
+    broker_shutdown_msg = (
+        "An attempt was made to enqueue a message with a Broker that has "
+        "already exitted. It is likely your program called Broker.shutdown() "
+        "too early."
+    )
+
+    def defer(self, func, *args, **kwargs):
+        """
+        Arrange for `func()` to execute on the broker thread. This function
+        returns immediately without waiting the result of `func()`. Use
+        :meth:`defer_sync` to block until a result is available.
+
+        :raises mitogen.core.Error:
+            :meth:`defer` was called after :class:`Broker` has begun shutdown.
+        """
+        if thread.get_ident() == self.broker_ident:
+            _vv and IOLOG.debug('%r.defer() [immediate]', self)
+            return func(*args, **kwargs)
+        if self._broker._exitted:
+            raise Error(self.broker_shutdown_msg)
+
+        _vv and IOLOG.debug('%r.defer() [fd=%r]', self,
+                            self.stream.transmit_side.fd)
+        self._deferred.append((func, args, kwargs))
+        self._wake()
+
+
+class IoLoggerProtocol(DelimitedProtocol):
+    """
+    Attached to one end of a socket pair whose other end overwrites one of the
+    standard ``stdout`` or ``stderr`` file descriptors in a child context.
+    Received data is split up into lines, decoded as UTF-8 and logged to the
+    :mod:`logging` package as either the ``stdout`` or ``stderr`` logger.
+
+    Logging in child contexts is in turn forwarded to the master process using
+    :class:`LogHandler`.
+    """
+    @classmethod
+    def build_stream(cls, name, dest_fd):
+        """
+        Even though the file descriptor `dest_fd` will hold the opposite end of
+        the socket open, we must keep a separate dup() of it (i.e. wsock) in
+        case some code decides to overwrite `dest_fd` later, which would
+        prevent break :meth:`on_shutdown` from calling :meth:`shutdown()
+        <socket.socket.shutdown>` on it.
+        """
+        rsock, wsock = socket.socketpair()
+        os.dup2(wsock.fileno(), dest_fd)
+        stream = super(IoLoggerProtocol, cls).build_stream(name)
+        stream.name = name
+        stream.accept(rsock, wsock)
+        return stream
+
+    def __init__(self, name):
+        self._log = logging.getLogger(name)
+        # #453: prevent accidental log initialization in a child creating a
+        # feedback loop.
+        self._log.propagate = False
+        self._log.handlers = logging.getLogger().handlers[:]
+
+    def on_shutdown(self, broker):
+        """
+        Shut down the write end of the socket, preventing any further writes to
+        it by this process, or subprocess that inherited it. This allows any
+        remaining kernel-buffered data to be drained during graceful shutdown
+        without the buffer continuously refilling due to some out of control
+        child process.
+        """
+        _v and LOG.debug('%r: shutting down', self)
+        if not IS_WSL:
+            # #333: WSL generates invalid readiness indication on shutdown().
+            # This modifies the *kernel object* inherited by children, causing
+            # EPIPE on subsequent writes to any dupped FD in any process. The
+            # read side can then drain completely of prior buffered data.
+            self.stream.transmit_side.fp.shutdown(socket.SHUT_WR)
+        self.stream.transmit_side.close()
+
+    def on_line_received(self, line):
+        """
+        Decode the received line as UTF-8 and pass it to the logging framework.
+        """
+        self._log.info('%s', line.decode('utf-8', 'replace'))
+
+
+class Router(object):
+    """
+    Route messages between contexts, and invoke local handlers for messages
+    addressed to this context. :meth:`Router.route() <route>` straddles the
+    :class:`Broker` thread and user threads, it is safe to call anywhere.
+
+    **Note:** This is the somewhat limited core version of the Router class
+    used by child contexts. The master subclass is documented below this one.
+    """
+    #: The :class:`mitogen.core.Context` subclass to use when constructing new
+    #: :class:`Context` objects in :meth:`myself` and :meth:`context_by_id`.
+    #: Permits :class:`Router` subclasses to extend the :class:`Context`
+    #: interface, as done in :class:`mitogen.parent.Router`.
+    context_class = Context
+
+    max_message_size = 128 * 1048576
+
+    #: When :data:`True`, permit children to only communicate with the current
+    #: context or a parent of the current context. Routing between siblings or
+    #: children of parents is prohibited, ensuring no communication is possible
+    #: between intentionally partitioned networks, such as when a program
+    #: simultaneously manipulates hosts spread across a corporate and a
+    #: production network, or production networks that are otherwise
+    #: air-gapped.
+    #:
+    #: Sending a prohibited message causes an error to be logged and a dead
+    #: message to be sent in reply to the errant message, if that message has
+    #: ``reply_to`` set.
+    #:
+    #: The value of :data:`unidirectional` becomes the default for the
+    #: :meth:`local() <mitogen.master.Router.local>` `unidirectional`
+    #: parameter.
+    unidirectional = False
+
+    duplicate_handle_msg = 'cannot register a handle that already exists'
+    refused_msg = 'refused by policy'
+    invalid_handle_msg = 'invalid handle'
+    too_large_msg = 'message too large (max %d bytes)'
+    respondent_disconnect_msg = 'the respondent Context has disconnected'
+    broker_exit_msg = 'Broker has exitted'
+    no_route_msg = 'no route to %r, my ID is %r'
+    unidirectional_msg = (
+        'routing mode prevents forward of message from context %d to '
+        'context %d via context %d'
+    )
+
+    def __init__(self, broker):
+        self.broker = broker
+        listen(broker, 'exit', self._on_broker_exit)
+        self._setup_logging()
+
+        self._write_lock = threading.Lock()
+        #: context ID -> Stream; must hold _write_lock to edit or iterate
+        self._stream_by_id = {}
+        #: List of contexts to notify of shutdown; must hold _write_lock
+        self._context_by_id = {}
+        self._last_handle = itertools.count(1000)
+        #: handle -> (persistent?, func(msg))
+        self._handle_map = {}
+        #: Context -> set { handle, .. }
+        self._handles_by_respondent = {}
+        self.add_handler(self._on_del_route, DEL_ROUTE)
+
+    def __repr__(self):
+        return 'Router(%r)' % (self.broker,)
+
+    def _setup_logging(self):
+        """
+        This is done in the :class:`Router` constructor for historical reasons.
+        It must be called before ExternalContext logs its first messages, but
+        after logging has been setup. It must also be called when any router is
+        constructed for a consumer app.
+        """
+        # Here seems as good a place as any.
+        global _v, _vv
+        _v = logging.getLogger().level <= logging.DEBUG
+        _vv = IOLOG.level <= logging.DEBUG
+
+    def _on_del_route(self, msg):
+        """
+        Stub :data:`DEL_ROUTE` handler; fires 'disconnect' events on the
+        corresponding :attr:`_context_by_id` member. This is replaced by
+        :class:`mitogen.parent.RouteMonitor` in an upgraded context.
+        """
+        if msg.is_dead:
+            return
+
+        target_id_s, _, name = bytes_partition(msg.data, b(':'))
+        target_id = int(target_id_s, 10)
+        LOG.error('%r: deleting route to %s (%d)',
+                  self, to_text(name), target_id)
+        context = self._context_by_id.get(target_id)
+        if context:
+            fire(context, 'disconnect')
+        else:
+            LOG.debug('DEL_ROUTE for unknown ID %r: %r', target_id, msg)
+
+    def _on_stream_disconnect(self, stream):
+        notify = []
+        self._write_lock.acquire()
+        try:
+            for context in list(self._context_by_id.values()):
+                stream_ = self._stream_by_id.get(context.context_id)
+                if stream_ is stream:
+                    del self._stream_by_id[context.context_id]
+                    notify.append(context)
+        finally:
+            self._write_lock.release()
+
+        # Happens outside lock as e.g. RouteMonitor wants the same lock.
+        for context in notify:
+            context.on_disconnect()
+
+    def _on_broker_exit(self):
+        """
+        Called prior to broker exit, informs callbacks registered with
+        :meth:`add_handler` the connection is dead.
+        """
+        _v and LOG.debug('%r: broker has exitted', self)
+        while self._handle_map:
+            _, (_, func, _, _) = self._handle_map.popitem()
+            func(Message.dead(self.broker_exit_msg))
+
+    def myself(self):
+        """
+        Return a :class:`Context` referring to the current process. Since
+        :class:`Context` is serializable, this is convenient to use in remote
+        function call parameter lists.
+        """
+        return self.context_class(
+            router=self,
+            context_id=mitogen.context_id,
+            name='self',
+        )
+
+    def context_by_id(self, context_id, via_id=None, create=True, name=None):
+        """
+        Return or construct a :class:`Context` given its ID. An internal
+        mapping of ID to the canonical :class:`Context` representing that ID,
+        so that :ref:`signals` can be raised.
+
+        This may be called from any thread, lookup and construction are atomic.
+
+        :param int context_id:
+            The context ID to look up.
+        :param int via_id:
+            If the :class:`Context` does not already exist, set its
+            :attr:`Context.via` to the :class:`Context` matching this ID.
+        :param bool create:
+            If the :class:`Context` does not already exist, create it.
+        :param str name:
+            If the :class:`Context` does not already exist, set its name.
+
+        :returns:
+            :class:`Context`, or return :data:`None` if `create` is
+            :data:`False` and no :class:`Context` previously existed.
+        """
+        context = self._context_by_id.get(context_id)
+        if context:
+            return context
+
+        if create and via_id is not None:
+            via = self.context_by_id(via_id)
+        else:
+            via = None
+
+        self._write_lock.acquire()
+        try:
+            context = self._context_by_id.get(context_id)
+            if create and not context:
+                context = self.context_class(self, context_id, name=name)
+                context.via = via
+                self._context_by_id[context_id] = context
+        finally:
+            self._write_lock.release()
+
+        return context
+
+    def register(self, context, stream):
+        """
+        Register a newly constructed context and its associated stream, and add
+        the stream's receive side to the I/O multiplexer. This method remains
+        public while the design has not yet settled.
+        """
+        _v and LOG.debug('%s: registering %r to stream %r',
+                         self, context, stream)
+        self._write_lock.acquire()
+        try:
+            self._stream_by_id[context.context_id] = stream
+            self._context_by_id[context.context_id] = context
+        finally:
+            self._write_lock.release()
+
+        self.broker.start_receive(stream)
+        listen(stream, 'disconnect', lambda: self._on_stream_disconnect(stream))
+
+    def stream_by_id(self, dst_id):
+        """
+        Return the :class:`Stream` that should be used to communicate with
+        `dst_id`. If a specific route for `dst_id` is not known, a reference to
+        the parent context's stream is returned. If the parent is disconnected,
+        or when running in the master context, return :data:`None` instead.
+
+        This can be used from any thread, but its output is only meaningful
+        from the context of the :class:`Broker` thread, as disconnection or
+        replacement could happen in parallel on the broker thread at any
+        moment. 
+        """
+        return (
+            self._stream_by_id.get(dst_id) or
+            self._stream_by_id.get(mitogen.parent_id)
+        )
+
+    def del_handler(self, handle):
+        """
+        Remove the handle registered for `handle`
+
+        :raises KeyError:
+            The handle wasn't registered.
+        """
+        _, _, _, respondent = self._handle_map.pop(handle)
+        if respondent:
+            self._handles_by_respondent[respondent].discard(handle)
+
+    def add_handler(self, fn, handle=None, persist=True,
+                    policy=None, respondent=None,
+                    overwrite=False):
+        """
+        Invoke `fn(msg)` on the :class:`Broker` thread for each Message sent to
+        `handle` from this context. Unregister after one invocation if
+        `persist` is :data:`False`. If `handle` is :data:`None`, a new handle
+        is allocated and returned.
+
+        :param int handle:
+            If not :data:`None`, an explicit handle to register, usually one of
+            the ``mitogen.core.*`` constants. If unspecified, a new unused
+            handle will be allocated.
+
+        :param bool persist:
+            If :data:`False`, the handler will be unregistered after a single
+            message has been received.
+
+        :param mitogen.core.Context respondent:
+            Context that messages to this handle are expected to be sent from.
+            If specified, arranges for a dead message to be delivered to `fn`
+            when disconnection of the context is detected.
+
+            In future `respondent` will likely also be used to prevent other
+            contexts from sending messages to the handle.
+
+        :param function policy:
+            Function invoked as `policy(msg, stream)` where `msg` is a
+            :class:`mitogen.core.Message` about to be delivered, and `stream`
+            is the :class:`mitogen.core.Stream` on which it was received. The
+            function must return :data:`True`, otherwise an error is logged and
+            delivery is refused.
+
+            Two built-in policy functions exist:
+
+            * :func:`has_parent_authority`: requires the message arrived from a
+              parent context, or a context acting with a parent context's
+              authority (``auth_id``).
+
+            * :func:`mitogen.parent.is_immediate_child`: requires the
+              message arrived from an immediately connected child, for use in
+              messaging patterns where either something becomes buggy or
+              insecure by permitting indirect upstream communication.
+
+            In case of refusal, and the message's ``reply_to`` field is
+            nonzero, a :class:`mitogen.core.CallError` is delivered to the
+            sender indicating refusal occurred.
+
+        :param bool overwrite:
+            If :data:`True`, allow existing handles to be silently overwritten.
+
+        :return:
+            `handle`, or if `handle` was :data:`None`, the newly allocated
+            handle.
+        :raises Error:
+            Attemp to register handle that was already registered.
+        """
+        handle = handle or next(self._last_handle)
+        _vv and IOLOG.debug('%r.add_handler(%r, %r, %r)', self, fn, handle, persist)
+        if handle in self._handle_map and not overwrite:
+            raise Error(self.duplicate_handle_msg)
+
+        self._handle_map[handle] = persist, fn, policy, respondent
+        if respondent:
+            if respondent not in self._handles_by_respondent:
+                self._handles_by_respondent[respondent] = set()
+                listen(respondent, 'disconnect',
+                       lambda: self._on_respondent_disconnect(respondent))
+            self._handles_by_respondent[respondent].add(handle)
+
+        return handle
+
+    def _on_respondent_disconnect(self, context):
+        for handle in self._handles_by_respondent.pop(context, ()):
+            _, fn, _, _  = self._handle_map[handle]
+            fn(Message.dead(self.respondent_disconnect_msg))
+            del self._handle_map[handle]
+
+    def _maybe_send_dead(self, unreachable, msg, reason, *args):
+        """
+        Send a dead message to either the original sender or the intended
+        recipient of `msg`, if the original sender was expecting a reply
+        (because its `reply_to` was set), otherwise assume the message is a
+        reply of some sort, and send the dead message to the original
+        destination.
+
+        :param bool unreachable:
+            If :data:`True`, the recipient is known to be dead or routing
+            failed due to a security precaution, so don't attempt to fallback
+            to sending the dead message to the recipient if the original sender
+            did not include a reply address.
+        :param mitogen.core.Message msg:
+            Message that triggered the dead message.
+        :param str reason:
+            Human-readable error reason.
+        :param tuple args:
+            Elements to interpolate with `reason`.
+        """
+        if args:
+            reason %= args
+        LOG.debug('%r: %r is dead: %r', self, msg, reason)
+        if msg.reply_to and not msg.is_dead:
+            msg.reply(Message.dead(reason=reason), router=self)
+        elif not unreachable:
+            self._async_route(
+                Message.dead(
+                    dst_id=msg.dst_id,
+                    handle=msg.handle,
+                    reason=reason,
+                )
+            )
+
+    def _invoke(self, msg, stream):
+        # IOLOG.debug('%r._invoke(%r)', self, msg)
+        try:
+            persist, fn, policy, respondent = self._handle_map[msg.handle]
+        except KeyError:
+            self._maybe_send_dead(True, msg, reason=self.invalid_handle_msg)
+            return
+
+        if respondent and not (msg.is_dead or
+                               msg.src_id == respondent.context_id):
+            self._maybe_send_dead(True, msg, 'reply from unexpected context')
+            return
+
+        if policy and not policy(msg, stream):
+            self._maybe_send_dead(True, msg, self.refused_msg)
+            return
+
+        if not persist:
+            self.del_handler(msg.handle)
+
+        try:
+            fn(msg)
+        except Exception:
+            LOG.exception('%r._invoke(%r): %r crashed', self, msg, fn)
+
+    def _async_route(self, msg, in_stream=None):
+        """
+        Arrange for `msg` to be forwarded towards its destination. If its
+        destination is the local context, then arrange for it to be dispatched
+        using the local handlers.
+
+        This is a lower overhead version of :meth:`route` that may only be
+        called from the :class:`Broker` thread.
+
+        :param Stream in_stream:
+            If not :data:`None`, the stream the message arrived on. Used for
+            performing source route verification, to ensure sensitive messages
+            such as ``CALL_FUNCTION`` arrive only from trusted contexts.
+        """
+        _vv and IOLOG.debug('%r._async_route(%r, %r)', self, msg, in_stream)
+
+        if len(msg.data) > self.max_message_size:
+            self._maybe_send_dead(False, msg, self.too_large_msg % (
+                self.max_message_size,
+            ))
+            return
+
+        parent_stream = self._stream_by_id.get(mitogen.parent_id)
+        src_stream = self._stream_by_id.get(msg.src_id, parent_stream)
+
+        # When the ingress stream is known, verify the message was received on
+        # the same as the stream we would expect to receive messages from the
+        # src_id and auth_id. This is like Reverse Path Filtering in IP, and
+        # ensures messages from a privileged context cannot be spoofed by a
+        # child.
+        if in_stream:
+            auth_stream = self._stream_by_id.get(msg.auth_id, parent_stream)
+            if in_stream != auth_stream:
+                LOG.error('%r: bad auth_id: got %r via %r, not %r: %r',
+                          self, msg.auth_id, in_stream, auth_stream, msg)
+                return
+
+            if msg.src_id != msg.auth_id and in_stream != src_stream:
+                LOG.error('%r: bad src_id: got %r via %r, not %r: %r',
+                          self, msg.src_id, in_stream, src_stream, msg)
+                return
+
+            # If the stream's MitogenProtocol has auth_id set, copy it to the
+            # message. This allows subtrees to become privileged by stamping a
+            # parent's context ID. It is used by mitogen.unix to mark client
+            # streams (like Ansible WorkerProcess) as having the same rights as
+            # the parent.
+            if in_stream.protocol.auth_id is not None:
+                msg.auth_id = in_stream.protocol.auth_id
+            if in_stream.protocol.on_message is not None:
+                in_stream.protocol.on_message(in_stream, msg)
+
+            # Record the IDs the source ever communicated with.
+            in_stream.protocol.egress_ids.add(msg.dst_id)
+
+        if msg.dst_id == mitogen.context_id:
+            return self._invoke(msg, in_stream)
+
+        out_stream = self._stream_by_id.get(msg.dst_id)
+        if (not out_stream) and (parent_stream != src_stream or not in_stream):
+            # No downstream route exists. The message could be from a child or
+            # ourselves for a parent, in which case we must forward it
+            # upstream, or it could be from a parent for a dead child, in which
+            # case its src_id/auth_id would fail verification if returned to
+            # the parent, so in that case reply with a dead message instead.
+            out_stream = parent_stream
+
+        if out_stream is None:
+            self._maybe_send_dead(True, msg, self.no_route_msg,
+                                  msg.dst_id, mitogen.context_id)
+            return
+
+        if in_stream and self.unidirectional and not \
+                (in_stream.protocol.is_privileged or
+                 out_stream.protocol.is_privileged):
+            self._maybe_send_dead(True, msg, self.unidirectional_msg,
+                                  in_stream.protocol.remote_id,
+                                  out_stream.protocol.remote_id,
+                                  mitogen.context_id)
+            return
+
+        out_stream.protocol._send(msg)
+
+    def route(self, msg):
+        """
+        Arrange for the :class:`Message` `msg` to be delivered to its
+        destination using any relevant downstream context, or if none is found,
+        by forwarding the message upstream towards the master context. If `msg`
+        is destined for the local context, it is dispatched using the handles
+        registered with :meth:`add_handler`.
+
+        This may be called from any thread.
+        """
+        self.broker.defer(self._async_route, msg)
+
+
+class NullTimerList(object):
+    def get_timeout(self):
+        return None
+
+
+class Broker(object):
+    """
+    Responsible for handling I/O multiplexing in a private thread.
+
+    **Note:** This somewhat limited core version is used by children. The
+    master subclass is documented below.
+    """
+    poller_class = Poller
+    _waker = None
+    _thread = None
+
+    # :func:`mitogen.parent._upgrade_broker` replaces this with
+    # :class:`mitogen.parent.TimerList` during upgrade.
+    timers = NullTimerList()
+
+    #: Seconds grace to allow :class:`streams <Stream>` to shutdown gracefully
+    #: before force-disconnecting them during :meth:`shutdown`.
+    shutdown_timeout = 3.0
+
+    def __init__(self, poller_class=None, activate_compat=True):
+        self._alive = True
+        self._exitted = False
+        self._waker = Waker.build_stream(self)
+        #: Arrange for `func(\*args, \**kwargs)` to be executed on the broker
+        #: thread, or immediately if the current thread is the broker thread.
+        #: Safe to call from any thread.
+        self.defer = self._waker.protocol.defer
+        self.poller = self.poller_class()
+        self.poller.start_receive(
+            self._waker.receive_side.fd,
+            (self._waker.receive_side, self._waker.on_receive)
+        )
+        self._thread = threading.Thread(
+            target=self._broker_main,
+            name='mitogen.broker'
+        )
+        self._thread.start()
+        if activate_compat:
+            self._py24_25_compat()
+
+    def _py24_25_compat(self):
+        """
+        Python 2.4/2.5 have grave difficulties with threads/fork. We
+        mandatorily quiesce all running threads during fork using a
+        monkey-patch there.
+        """
+        if sys.version_info < (2, 6):
+            # import_module() is used to avoid dep scanner.
+            os_fork = import_module('mitogen.os_fork')
+            os_fork._notice_broker_or_pool(self)
+
+    def start_receive(self, stream):
+        """
+        Mark the :attr:`receive_side <Stream.receive_side>` on `stream` as
+        ready for reading. Safe to call from any thread. When the associated
+        file descriptor becomes ready for reading,
+        :meth:`BasicStream.on_receive` will be called.
+        """
+        _vv and IOLOG.debug('%r.start_receive(%r)', self, stream)
+        side = stream.receive_side
+        assert side and not side.closed
+        self.defer(self.poller.start_receive,
+                   side.fd, (side, stream.on_receive))
+
+    def stop_receive(self, stream):
+        """
+        Mark the :attr:`receive_side <Stream.receive_side>` on `stream` as not
+        ready for reading. Safe to call from any thread.
+        """
+        _vv and IOLOG.debug('%r.stop_receive(%r)', self, stream)
+        self.defer(self.poller.stop_receive, stream.receive_side.fd)
+
+    def _start_transmit(self, stream):
+        """
+        Mark the :attr:`transmit_side <Stream.transmit_side>` on `stream` as
+        ready for writing. Must only be called from the Broker thread. When the
+        associated file descriptor becomes ready for writing,
+        :meth:`BasicStream.on_transmit` will be called.
+        """
+        _vv and IOLOG.debug('%r._start_transmit(%r)', self, stream)
+        side = stream.transmit_side
+        assert side and not side.closed
+        self.poller.start_transmit(side.fd, (side, stream.on_transmit))
+
+    def _stop_transmit(self, stream):
+        """
+        Mark the :attr:`transmit_side <Stream.receive_side>` on `stream` as not
+        ready for writing.
+        """
+        _vv and IOLOG.debug('%r._stop_transmit(%r)', self, stream)
+        self.poller.stop_transmit(stream.transmit_side.fd)
+
+    def keep_alive(self):
+        """
+        Return :data:`True` if any reader's :attr:`Side.keep_alive` attribute
+        is :data:`True`, or any :class:`Context` is still registered that is
+        not the master. Used to delay shutdown while some important work is in
+        progress (e.g. log draining).
+        """
+        it = (side.keep_alive for (_, (side, _)) in self.poller.readers)
+        return sum(it, 0) > 0 or self.timers.get_timeout() is not None
+
+    def defer_sync(self, func):
+        """
+        Arrange for `func()` to execute on :class:`Broker` thread, blocking the
+        current thread until a result or exception is available.
+
+        :returns:
+            Return value of `func()`.
+        """
+        latch = Latch()
+        def wrapper():
+            try:
+                latch.put(func())
+            except Exception:
+                latch.put(sys.exc_info()[1])
+        self.defer(wrapper)
+        res = latch.get()
+        if isinstance(res, Exception):
+            raise res
+        return res
+
+    def _call(self, stream, func):
+        """
+        Call `func(self)`, catching any exception that might occur, logging it,
+        and force-disconnecting the related `stream`.
+        """
+        try:
+            func(self)
+        except Exception:
+            LOG.exception('%r crashed', stream)
+            stream.on_disconnect(self)
+
+    def _loop_once(self, timeout=None):
+        """
+        Execute a single :class:`Poller` wait, dispatching any IO events that
+        caused the wait to complete.
+
+        :param float timeout:
+            If not :data:`None`, maximum time in seconds to wait for events.
+        """
+        _vv and IOLOG.debug('%r._loop_once(%r, %r)',
+                            self, timeout, self.poller)
+
+        timer_to = self.timers.get_timeout()
+        if timeout is None:
+            timeout = timer_to
+        elif timer_to is not None and timer_to < timeout:
+            timeout = timer_to
+
+        #IOLOG.debug('readers =\n%s', pformat(self.poller.readers))
+        #IOLOG.debug('writers =\n%s', pformat(self.poller.writers))
+        for side, func in self.poller.poll(timeout):
+            self._call(side.stream, func)
+        if timer_to is not None:
+            self.timers.expire()
+
+    def _broker_exit(self):
+        """
+        Forcefully call :meth:`Stream.on_disconnect` on any streams that failed
+        to shut down gracefully, then discard the :class:`Poller`.
+        """
+        for _, (side, _) in self.poller.readers + self.poller.writers:
+            LOG.debug('%r: force disconnecting %r', self, side)
+            side.stream.on_disconnect(self)
+
+        self.poller.close()
+
+    def _broker_shutdown(self):
+        """
+        Invoke :meth:`Stream.on_shutdown` for every active stream, then allow
+        up to :attr:`shutdown_timeout` seconds for the streams to unregister
+        themselves, logging an error if any did not unregister during the grace
+        period.
+        """
+        for _, (side, _) in self.poller.readers + self.poller.writers:
+            self._call(side.stream, side.stream.on_shutdown)
+
+        deadline = now() + self.shutdown_timeout
+        while self.keep_alive() and now() < deadline:
+            self._loop_once(max(0, deadline - now()))
+
+        if self.keep_alive():
+            LOG.error('%r: pending work still existed %d seconds after '
+                      'shutdown began. This may be due to a timer that is yet '
+                      'to expire, or a child connection that did not fully '
+                      'shut down.', self, self.shutdown_timeout)
+
+    def _do_broker_main(self):
+        """
+        Broker thread main function. Dispatches IO events until
+        :meth:`shutdown` is called.
+        """
+        # For Python 2.4, no way to retrieve ident except on thread.
+        self._waker.protocol.broker_ident = thread.get_ident()
+        try:
+            while self._alive:
+                self._loop_once()
+
+            fire(self, 'before_shutdown')
+            fire(self, 'shutdown')
+            self._broker_shutdown()
+        except Exception:
+            e = sys.exc_info()[1]
+            LOG.exception('broker crashed')
+            syslog.syslog(syslog.LOG_ERR, 'broker crashed: %s' % (e,))
+            syslog.closelog()  # prevent test 'fd leak'.
+
+        self._alive = False  # Ensure _alive is consistent on crash.
+        self._exitted = True
+        self._broker_exit()
+
+    def _broker_main(self):
+        try:
+            _profile_hook('mitogen.broker', self._do_broker_main)
+        finally:
+            # 'finally' to ensure _on_broker_exit() can always SIGTERM.
+            fire(self, 'exit')
+
+    def shutdown(self):
+        """
+        Request broker gracefully disconnect streams and stop. Safe to call
+        from any thread.
+        """
+        _v and LOG.debug('%r: shutting down', self)
+        def _shutdown():
+            self._alive = False
+        if self._alive and not self._exitted:
+            self.defer(_shutdown)
+
+    def join(self):
+        """
+        Wait for the broker to stop, expected to be called after
+        :meth:`shutdown`.
+        """
+        self._thread.join()
+
+    def __repr__(self):
+        return 'Broker(%04x)' % (id(self) & 0xffff,)
+
+
+class Dispatcher(object):
+    """
+    Implementation of the :data:`CALL_FUNCTION` handle for a child context.
+    Listens on the child's main thread for messages sent by
+    :class:`mitogen.parent.CallChain` and dispatches the function calls they
+    describe.
+
+    If a :class:`mitogen.parent.CallChain` sending a message is in pipelined
+    mode, any exception that occurs is recorded, and causes all subsequent
+    calls with the same `chain_id` to fail with the same exception.
+    """
+    _service_recv = None
+
+    def __repr__(self):
+        return 'Dispatcher'
+
+    def __init__(self, econtext):
+        self.econtext = econtext
+        #: Chain ID -> CallError if prior call failed.
+        self._error_by_chain_id = {}
+        self.recv = Receiver(
+            router=econtext.router,
+            handle=CALL_FUNCTION,
+            policy=has_parent_authority,
+        )
+        #: The :data:`CALL_SERVICE` :class:`Receiver` that will eventually be
+        #: reused by :class:`mitogen.service.Pool`, should it ever be loaded.
+        #: This is necessary for race-free reception of all service requests
+        #: delivered regardless of whether the stub or real service pool are
+        #: loaded. See #547 for related sorrows.
+        Dispatcher._service_recv = Receiver(
+            router=econtext.router,
+            handle=CALL_SERVICE,
+            policy=has_parent_authority,
+        )
+        self._service_recv.notify = self._on_call_service
+        listen(econtext.broker, 'shutdown', self._on_broker_shutdown)
+
+    def _on_broker_shutdown(self):
+        if self._service_recv.notify == self._on_call_service:
+            self._service_recv.notify = None
+        self.recv.close()
+
+
+    @classmethod
+    @takes_econtext
+    def forget_chain(cls, chain_id, econtext):
+        econtext.dispatcher._error_by_chain_id.pop(chain_id, None)
+
+    def _parse_request(self, msg):
+        data = msg.unpickle(throw=False)
+        _v and LOG.debug('%r: dispatching %r', self, data)
+
+        chain_id, modname, klass, func, args, kwargs = data
+        obj = import_module(modname)
+        if klass:
+            obj = getattr(obj, klass)
+        fn = getattr(obj, func)
+        if getattr(fn, 'mitogen_takes_econtext', None):
+            kwargs.setdefault('econtext', self.econtext)
+        if getattr(fn, 'mitogen_takes_router', None):
+            kwargs.setdefault('router', self.econtext.router)
+
+        return chain_id, fn, args, kwargs
+
+    def _dispatch_one(self, msg):
+        try:
+            chain_id, fn, args, kwargs = self._parse_request(msg)
+        except Exception:
+            return None, CallError(sys.exc_info()[1])
+
+        if chain_id in self._error_by_chain_id:
+            return chain_id, self._error_by_chain_id[chain_id]
+
+        try:
+            return chain_id, fn(*args, **kwargs)
+        except Exception:
+            e = CallError(sys.exc_info()[1])
+            if chain_id is not None:
+                self._error_by_chain_id[chain_id] = e
+            return chain_id, e
+
+    def _on_call_service(self, recv):
+        """
+        Notifier for the :data:`CALL_SERVICE` receiver. This is called on the
+        :class:`Broker` thread for any service messages arriving at this
+        context, for as long as no real service pool implementation is loaded.
+
+        In order to safely bootstrap the service pool implementation a sentinel
+        message is enqueued on the :data:`CALL_FUNCTION` receiver in order to
+        wake the main thread, where the importer can run without any
+        possibility of suffering deadlock due to concurrent uses of the
+        importer.
+
+        Should the main thread be blocked indefinitely, preventing the import
+        from ever running, if it is blocked waiting on a service call, then it
+        means :mod:`mitogen.service` has already been imported and
+        :func:`mitogen.service.get_or_create_pool` has already run, meaning the
+        service pool is already active and the duplicate initialization was not
+        needed anyway.
+
+        #547: This trickery is needed to avoid the alternate option of spinning
+        a temporary thread to import the service pool, which could deadlock if
+        a custom import hook executing on the main thread (under the importer
+        lock) would block waiting for some data that was in turn received by a
+        service. Main thread import lock can't be released until service is
+        running, service cannot satisfy request until import lock is released.
+        """
+        self.recv._on_receive(Message(handle=STUB_CALL_SERVICE))
+
+    def _init_service_pool(self):
+        import mitogen.service
+        mitogen.service.get_or_create_pool(router=self.econtext.router)
+
+    def _dispatch_calls(self):
+        for msg in self.recv:
+            if msg.handle == STUB_CALL_SERVICE:
+                if msg.src_id == mitogen.context_id:
+                    self._init_service_pool()
+                continue
+
+            chain_id, ret = self._dispatch_one(msg)
+            _v and LOG.debug('%r: %r -> %r', self, msg, ret)
+            if msg.reply_to:
+                msg.reply(ret)
+            elif isinstance(ret, CallError) and chain_id is None:
+                LOG.error('No-reply function call failed: %s', ret)
+
+    def run(self):
+        if self.econtext.config.get('on_start'):
+            self.econtext.config['on_start'](self.econtext)
+
+        _profile_hook('mitogen.child_main', self._dispatch_calls)
+
+
+class ExternalContext(object):
+    """
+    External context implementation.
+
+    This class contains the main program implementation for new children. It is
+    responsible for setting up everything about the process environment, import
+    hooks, standard IO redirection, logging, configuring a :class:`Router` and
+    :class:`Broker`, and finally arranging for :class:`Dispatcher` to take over
+    the main thread after initialization is complete.
+
+    .. attribute:: broker
+
+        The :class:`mitogen.core.Broker` instance.
+
+    .. attribute:: context
+
+        The :class:`mitogen.core.Context` instance.
+
+    .. attribute:: channel
+
+        The :class:`mitogen.core.Channel` over which :data:`CALL_FUNCTION`
+        requests are received.
+
+    .. attribute:: importer
+
+        The :class:`mitogen.core.Importer` instance.
+
+    .. attribute:: stdout_log
+
+        The :class:`IoLogger` connected to :data:`sys.stdout`.
+
+    .. attribute:: stderr_log
+
+        The :class:`IoLogger` connected to :data:`sys.stderr`.
+    """
+    detached = False
+
+    def __init__(self, config):
+        self.config = config
+
+    def _on_broker_exit(self):
+        if not self.config['profiling']:
+            os.kill(os.getpid(), signal.SIGTERM)
+
+    def _on_shutdown_msg(self, msg):
+        if not msg.is_dead:
+            _v and LOG.debug('shutdown request from context %d', msg.src_id)
+            self.broker.shutdown()
+
+    def _on_parent_disconnect(self):
+        if self.detached:
+            mitogen.parent_ids = []
+            mitogen.parent_id = None
+            LOG.info('Detachment complete')
+        else:
+            _v and LOG.debug('parent stream is gone, dying.')
+            self.broker.shutdown()
+
+    def detach(self):
+        self.detached = True
+        stream = self.router.stream_by_id(mitogen.parent_id)
+        if stream:  # not double-detach()'d
+            os.setsid()
+            self.parent.send_await(Message(handle=DETACHING))
+            LOG.info('Detaching from %r; parent is %s', stream, self.parent)
+            for x in range(20):
+                pending = self.broker.defer_sync(stream.protocol.pending_bytes)
+                if not pending:
+                    break
+                time.sleep(0.05)
+            if pending:
+                LOG.error('Stream had %d bytes after 2000ms', pending)
+            self.broker.defer(stream.on_disconnect, self.broker)
+
+    def _setup_master(self):
+        Router.max_message_size = self.config['max_message_size']
+        if self.config['profiling']:
+            enable_profiling()
+        self.broker = Broker(activate_compat=False)
+        self.router = Router(self.broker)
+        self.router.debug = self.config.get('debug', False)
+        self.router.unidirectional = self.config['unidirectional']
+        self.router.add_handler(
+            fn=self._on_shutdown_msg,
+            handle=SHUTDOWN,
+            policy=has_parent_authority,
+        )
+        self.master = Context(self.router, 0, 'master')
+        parent_id = self.config['parent_ids'][0]
+        if parent_id == 0:
+            self.parent = self.master
+        else:
+            self.parent = Context(self.router, parent_id, 'parent')
+
+        in_fd = self.config.get('in_fd', 100)
+        in_fp = os.fdopen(os.dup(in_fd), 'rb', 0)
+        os.close(in_fd)
+
+        out_fp = os.fdopen(os.dup(self.config.get('out_fd', 1)), 'wb', 0)
+        self.stream = MitogenProtocol.build_stream(
+            self.router,
+            parent_id,
+            local_id=self.config['context_id'],
+            parent_ids=self.config['parent_ids']
+        )
+        self.stream.accept(in_fp, out_fp)
+        self.stream.name = 'parent'
+        self.stream.receive_side.keep_alive = False
+
+        listen(self.stream, 'disconnect', self._on_parent_disconnect)
+        listen(self.broker, 'exit', self._on_broker_exit)
+
+    def _reap_first_stage(self):
+        try:
+            os.wait()  # Reap first stage.
+        except OSError:
+            pass  # No first stage exists (e.g. fakessh)
+
+    def _setup_logging(self):
+        self.log_handler = LogHandler(self.master)
+        root = logging.getLogger()
+        root.setLevel(self.config['log_level'])
+        root.handlers = [self.log_handler]
+        if self.config['debug']:
+            enable_debug_logging()
+
+    def _setup_importer(self):
+        importer = self.config.get('importer')
+        if importer:
+            importer._install_handler(self.router)
+            importer._context = self.parent
+        else:
+            core_src_fd = self.config.get('core_src_fd', 101)
+            if core_src_fd:
+                fp = os.fdopen(core_src_fd, 'rb', 1)
+                try:
+                    core_src = fp.read()
+                    # Strip "ExternalContext.main()" call from last line.
+                    core_src = b('\n').join(core_src.splitlines()[:-1])
+                finally:
+                    fp.close()
+            else:
+                core_src = None
+
+            importer = Importer(
+                self.router,
+                self.parent,
+                core_src,
+                self.config.get('whitelist', ()),
+                self.config.get('blacklist', ()),
+            )
+
+        self.importer = importer
+        self.router.importer = importer
+        sys.meta_path.insert(0, self.importer)
+
+    def _setup_package(self):
+        global mitogen
+        mitogen = imp.new_module('mitogen')
+        mitogen.__package__ = 'mitogen'
+        mitogen.__path__ = []
+        mitogen.__loader__ = self.importer
+        mitogen.main = lambda *args, **kwargs: (lambda func: None)
+        mitogen.core = sys.modules['__main__']
+        mitogen.core.__file__ = 'x/mitogen/core.py'  # For inspect.getsource()
+        mitogen.core.__loader__ = self.importer
+        sys.modules['mitogen'] = mitogen
+        sys.modules['mitogen.core'] = mitogen.core
+        del sys.modules['__main__']
+
+    def _setup_globals(self):
+        mitogen.is_master = False
+        mitogen.__version__ = self.config['version']
+        mitogen.context_id = self.config['context_id']
+        mitogen.parent_ids = self.config['parent_ids'][:]
+        mitogen.parent_id = mitogen.parent_ids[0]
+
+    def _nullify_stdio(self):
+        """
+        Open /dev/null to replace stdio temporarily. In case of odd startup,
+        assume we may be allocated a standard handle.
+        """
+        for stdfd, mode in ((0, os.O_RDONLY), (1, os.O_RDWR), (2, os.O_RDWR)):
+            fd = os.open('/dev/null', mode)
+            if fd != stdfd:
+                os.dup2(fd, stdfd)
+                os.close(fd)
+
+    def _preserve_tty_fp(self):
+        """
+        #481: when stderr is a TTY due to being started via tty_create_child()
+        or hybrid_tty_create_child(), and some privilege escalation tool like
+        prehistoric versions of sudo exec this process over the top of itself,
+        there is nothing left to keep the slave PTY open after we replace our
+        stdio. Therefore if stderr is a TTY, keep around a permanent dup() to
+        avoid receiving SIGHUP.
+        """
+        try:
+            if os.isatty(2):
+                self.reserve_tty_fp = os.fdopen(os.dup(2), 'r+b', 0)
+                set_cloexec(self.reserve_tty_fp.fileno())
+        except OSError:
+            pass
+
+    def _setup_stdio(self):
+        self._preserve_tty_fp()
+        # When sys.stdout was opened by the runtime, overwriting it will not
+        # close FD 1. However when forking from a child that previously used
+        # fdopen(), overwriting it /will/ close FD 1. So we must swallow the
+        # close before IoLogger overwrites FD 1, otherwise its new FD 1 will be
+        # clobbered. Additionally, stdout must be replaced with /dev/null prior
+        # to stdout.close(), since if block buffering was active in the parent,
+        # any pre-fork buffered data will be flushed on close(), corrupting the
+        # connection to the parent.
+        self._nullify_stdio()
+        sys.stdout.close()
+        self._nullify_stdio()
+
+        self.loggers = []
+        for name, fd in (('stdout', 1), ('stderr', 2)):
+            log = IoLoggerProtocol.build_stream(name, fd)
+            self.broker.start_receive(log)
+            self.loggers.append(log)
+
+        # Reopen with line buffering.
+        sys.stdout = os.fdopen(1, 'w', 1)
+
+    def main(self):
+        self._setup_master()
+        try:
+            try:
+                self._setup_logging()
+                self._setup_importer()
+                self._reap_first_stage()
+                if self.config.get('setup_package', True):
+                    self._setup_package()
+                self._setup_globals()
+                if self.config.get('setup_stdio', True):
+                    self._setup_stdio()
+
+                self.dispatcher = Dispatcher(self)
+                self.router.register(self.parent, self.stream)
+                self.router._setup_logging()
+
+                _v and LOG.debug('Python version is %s', sys.version)
+                _v and LOG.debug('Parent is context %r (%s); my ID is %r',
+                                 self.parent.context_id, self.parent.name,
+                                 mitogen.context_id)
+                _v and LOG.debug('pid:%r ppid:%r uid:%r/%r, gid:%r/%r host:%r',
+                                 os.getpid(), os.getppid(), os.geteuid(),
+                                 os.getuid(), os.getegid(), os.getgid(),
+                                 socket.gethostname())
+
+                sys.executable = os.environ.pop('ARGV0', sys.executable)
+                _v and LOG.debug('Recovered sys.executable: %r', sys.executable)
+
+                if self.config.get('send_ec2', True):
+                    self.stream.transmit_side.write(b('MITO002\n'))
+                self.broker._py24_25_compat()
+                self.log_handler.uncork()
+                self.dispatcher.run()
+                _v and LOG.debug('ExternalContext.main() normal exit')
+            except KeyboardInterrupt:
+                LOG.debug('KeyboardInterrupt received, exiting gracefully.')
+            except BaseException:
+                LOG.exception('ExternalContext.main() crashed')
+                raise
+        finally:
+            self.broker.shutdown()
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/debug.py b/deployment/lib/mitogen-0.2.9/mitogen/debug.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbab550ec71bc5b2b4523bb7d4d84f52414ae867
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/debug.py
@@ -0,0 +1,236 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+Basic signal handler for dumping thread stacks.
+"""
+
+import difflib
+import logging
+import os
+import gc
+import signal
+import sys
+import threading
+import time
+import traceback
+
+import mitogen.core
+import mitogen.parent
+
+
+LOG = logging.getLogger(__name__)
+_last = None
+
+
+def enable_evil_interrupts():
+    signal.signal(signal.SIGALRM, (lambda a, b: None))
+    signal.setitimer(signal.ITIMER_REAL, 0.01, 0.01)
+
+
+def disable_evil_interrupts():
+    signal.setitimer(signal.ITIMER_REAL, 0, 0)
+
+
+def _hex(n):
+    return '%08x' % n
+
+
+def get_subclasses(klass):
+    """
+    Rather than statically import every interesting subclass, forcing it all to
+    be transferred and potentially disrupting the debugged environment,
+    enumerate only those loaded in memory. Also returns the original class.
+    """
+    stack = [klass]
+    seen = set()
+    while stack:
+        klass = stack.pop()
+        seen.add(klass)
+        stack.extend(klass.__subclasses__())
+    return seen
+
+
+def get_routers():
+    return dict(
+        (_hex(id(router)), router)
+        for klass in get_subclasses(mitogen.core.Router)
+        for router in gc.get_referrers(klass)
+        if isinstance(router, mitogen.core.Router)
+    )
+
+
+def get_router_info():
+    return {
+        'routers': dict(
+            (id_, {
+                'id': id_,
+                'streams': len(set(router._stream_by_id.values())),
+                'contexts': len(set(router._context_by_id.values())),
+                'handles': len(router._handle_map),
+            })
+            for id_, router in get_routers().items()
+        )
+    }
+
+
+def get_stream_info(router_id):
+    router = get_routers().get(router_id)
+    return {
+        'streams': dict(
+            (_hex(id(stream)), ({
+                'name': stream.name,
+                'remote_id': stream.remote_id,
+                'sent_module_count': len(getattr(stream, 'sent_modules', [])),
+                'routes': sorted(getattr(stream, 'routes', [])),
+                'type': type(stream).__module__,
+            }))
+            for via_id, stream in router._stream_by_id.items()
+        )
+    }
+
+
+def format_stacks():
+    name_by_id = dict(
+        (t.ident, t.name)
+        for t in threading.enumerate()
+    )
+
+    l = ['', '']
+    for threadId, stack in sys._current_frames().items():
+        l += ["# PID %d ThreadID: (%s) %s; %r" % (
+            os.getpid(),
+            name_by_id.get(threadId, '<no name>'),
+            threadId,
+            stack,
+        )]
+        #stack = stack.f_back.f_back
+
+        for filename, lineno, name, line in traceback.extract_stack(stack):
+            l += [
+                'File: "%s", line %d, in %s' % (
+                    filename,
+                    lineno,
+                    name
+                )
+            ]
+            if line:
+                l += ['    ' + line.strip()]
+        l += ['']
+
+    l += ['', '']
+    return '\n'.join(l)
+
+
+def get_snapshot():
+    global _last
+
+    s = format_stacks()
+    snap = s
+    if _last:
+        snap += '\n'
+        diff = list(difflib.unified_diff(
+            a=_last.splitlines(),
+            b=s.splitlines(),
+            fromfile='then',
+            tofile='now'
+        ))
+
+        if diff:
+            snap += '\n'.join(diff) + '\n'
+        else:
+            snap += '(no change since last time)\n'
+    _last = s
+    return snap
+
+
+def _handler(*_):
+    fp = open('/dev/tty', 'w', 1)
+    fp.write(get_snapshot())
+    fp.close()
+
+
+def install_handler():
+    signal.signal(signal.SIGUSR2, _handler)
+
+
+def _logging_main(secs):
+    while True:
+        time.sleep(secs)
+        LOG.info('PERIODIC THREAD DUMP\n\n%s', get_snapshot())
+
+
+def dump_to_logger(secs=5):
+    th = threading.Thread(
+        target=_logging_main,
+        kwargs={'secs': secs},
+        name='mitogen.debug.dump_to_logger',
+    )
+    th.setDaemon(True)
+    th.start()
+
+
+class ContextDebugger(object):
+    @classmethod
+    @mitogen.core.takes_econtext
+    def _configure_context(cls, econtext):
+        mitogen.parent.upgrade_router(econtext)
+        econtext.debugger = cls(econtext.router)
+
+    def __init__(self, router):
+        self.router = router
+        self.router.add_handler(
+            func=self._on_debug_msg,
+            handle=mitogen.core.DEBUG,
+            persist=True,
+            policy=mitogen.core.has_parent_authority,
+        )
+        mitogen.core.listen(router, 'register', self._on_stream_register)
+        LOG.debug('Context debugging configured.')
+
+    def _on_stream_register(self, context, stream):
+        LOG.debug('_on_stream_register: sending configure() to %r', stream)
+        context.call_async(ContextDebugger._configure_context)
+
+    def _on_debug_msg(self, msg):
+        if msg != mitogen.core._DEAD:
+            threading.Thread(
+                target=self._handle_debug_msg,
+                name='ContextDebuggerHandler',
+                args=(msg,)
+            ).start()
+
+    def _handle_debug_msg(self, msg):
+        try:
+            method, args, kwargs = msg.unpickle()
+            msg.reply(getattr(self, method)(*args, **kwargs))
+        except Exception:
+            e = sys.exc_info()[1]
+            msg.reply(mitogen.core.CallError(e))
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/doas.py b/deployment/lib/mitogen-0.2.9/mitogen/doas.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b212b9bbf61779cfe2ffe9dcac4af44097547fa
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/doas.py
@@ -0,0 +1,142 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import logging
+import re
+
+import mitogen.core
+import mitogen.parent
+
+
+LOG = logging.getLogger(__name__)
+
+password_incorrect_msg = 'doas password is incorrect'
+password_required_msg = 'doas password is required'
+
+
+class PasswordError(mitogen.core.StreamError):
+    pass
+
+
+class Options(mitogen.parent.Options):
+    username = u'root'
+    password = None
+    doas_path = 'doas'
+    password_prompt = u'Password:'
+    incorrect_prompts = (
+        u'doas: authentication failed',  # slicer69/doas
+        u'doas: Authorization failed',   # openbsd/src
+    )
+
+    def __init__(self, username=None, password=None, doas_path=None,
+                 password_prompt=None, incorrect_prompts=None, **kwargs):
+        super(Options, self).__init__(**kwargs)
+        if username is not None:
+            self.username = mitogen.core.to_text(username)
+        if password is not None:
+            self.password = mitogen.core.to_text(password)
+        if doas_path is not None:
+            self.doas_path = doas_path
+        if password_prompt is not None:
+            self.password_prompt = mitogen.core.to_text(password_prompt)
+        if incorrect_prompts is not None:
+            self.incorrect_prompts = [
+                mitogen.core.to_text(p)
+                for p in incorrect_prompts
+            ]
+
+
+class BootstrapProtocol(mitogen.parent.RegexProtocol):
+    password_sent = False
+
+    def setup_patterns(self, conn):
+        prompt_pattern = re.compile(
+            re.escape(conn.options.password_prompt).encode('utf-8'),
+            re.I
+        )
+        incorrect_prompt_pattern = re.compile(
+            u'|'.join(
+                re.escape(s)
+                for s in conn.options.incorrect_prompts
+            ).encode('utf-8'),
+            re.I
+        )
+
+        self.PATTERNS = [
+            (incorrect_prompt_pattern, type(self)._on_incorrect_password),
+        ]
+        self.PARTIAL_PATTERNS = [
+            (prompt_pattern, type(self)._on_password_prompt),
+        ]
+
+    def _on_incorrect_password(self, line, match):
+        if self.password_sent:
+            self.stream.conn._fail_connection(
+                PasswordError(password_incorrect_msg)
+            )
+
+    def _on_password_prompt(self, line, match):
+        if self.stream.conn.options.password is None:
+            self.stream.conn._fail_connection(
+                PasswordError(password_required_msg)
+            )
+            return
+
+        if self.password_sent:
+            self.stream.conn._fail_connection(
+                PasswordError(password_incorrect_msg)
+            )
+            return
+
+        LOG.debug('sending password')
+        self.stream.transmit_side.write(
+            (self.stream.conn.options.password + '\n').encode('utf-8')
+        )
+        self.password_sent = True
+
+
+class Connection(mitogen.parent.Connection):
+    options_class = Options
+    diag_protocol_class = BootstrapProtocol
+
+    create_child = staticmethod(mitogen.parent.hybrid_tty_create_child)
+    child_is_immediate_subprocess = False
+
+    def _get_name(self):
+        return u'doas.' + self.options.username
+
+    def stderr_stream_factory(self):
+        stream = super(Connection, self).stderr_stream_factory()
+        stream.protocol.setup_patterns(self)
+        return stream
+
+    def get_boot_command(self):
+        bits = [self.options.doas_path, '-u', self.options.username, '--']
+        return bits + super(Connection, self).get_boot_command()
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/docker.py b/deployment/lib/mitogen-0.2.9/mitogen/docker.py
new file mode 100644
index 0000000000000000000000000000000000000000..48848c89348e43d68987cbcaa36be801a6402488
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/docker.py
@@ -0,0 +1,83 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import logging
+
+import mitogen.core
+import mitogen.parent
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Options(mitogen.parent.Options):
+    container = None
+    image = None
+    username = None
+    docker_path = u'docker'
+
+    def __init__(self, container=None, image=None, docker_path=None,
+                 username=None, **kwargs):
+        super(Options, self).__init__(**kwargs)
+        assert container or image
+        if container:
+            self.container = mitogen.core.to_text(container)
+        if image:
+            self.image = mitogen.core.to_text(image)
+        if docker_path:
+            self.docker_path = mitogen.core.to_text(docker_path)
+        if username:
+            self.username = mitogen.core.to_text(username)
+
+
+class Connection(mitogen.parent.Connection):
+    options_class = Options
+    child_is_immediate_subprocess = False
+
+    # TODO: better way of capturing errors such as "No such container."
+    create_child_args = {
+        'merge_stdio': True
+    }
+
+    def _get_name(self):
+        return u'docker.' + (self.options.container or self.options.image)
+
+    def get_boot_command(self):
+        args = ['--interactive']
+        if self.options.username:
+            args += ['--user=' + self.options.username]
+
+        bits = [self.options.docker_path]
+        if self.options.container:
+            bits += ['exec'] + args + [self.options.container]
+        elif self.options.image:
+            bits += ['run'] + args + ['--rm', self.options.image]
+
+        return bits + super(Connection, self).get_boot_command()
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/fakessh.py b/deployment/lib/mitogen-0.2.9/mitogen/fakessh.py
new file mode 100644
index 0000000000000000000000000000000000000000..e62cf84a71852011fac1da9ed2dbad79ceba120b
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/fakessh.py
@@ -0,0 +1,456 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+:mod:`mitogen.fakessh` is a stream implementation that starts a subprocess with
+its environment modified such that ``PATH`` searches for `ssh` return a Mitogen
+implementation of SSH. When invoked, this implementation arranges for the
+command line supplied by the caller to be executed in a remote context, reusing
+the parent context's (possibly proxied) connection to that remote context.
+
+This allows tools like `rsync` and `scp` to transparently reuse the connections
+and tunnels already established by the host program to connect to a target
+machine, without wasteful redundant SSH connection setup, 3-way handshakes, or
+firewall hopping configurations, and enables these tools to be used in
+impossible scenarios, such as over `sudo` with ``requiretty`` enabled.
+
+The fake `ssh` command source is written to a temporary file on disk, and
+consists of a copy of the :py:mod:`mitogen.core` source code (just like any
+other child context), with a line appended to cause it to connect back to the
+host process over an FD it inherits. As there is no reliance on an existing
+filesystem file, it is possible for child contexts to use fakessh.
+
+As a consequence of connecting back through an inherited FD, only one SSH
+invocation is possible, which is fine for tools like `rsync`, however in future
+this restriction will be lifted.
+
+Sequence:
+
+    1. ``fakessh`` Context and Stream created by parent context. The stream's
+       buffer has a :py:func:`_fakessh_main` :py:data:`CALL_FUNCTION
+       <mitogen.core.CALL_FUNCTION>` enqueued.
+    2. Target program (`rsync/scp/sftp`) invoked, which internally executes
+       `ssh` from ``PATH``.
+    3. :py:mod:`mitogen.core` bootstrap begins, recovers the stream FD
+       inherited via the target program, established itself as the fakessh
+       context.
+    4. :py:func:`_fakessh_main` :py:data:`CALL_FUNCTION
+       <mitogen.core.CALL_FUNCTION>` is read by fakessh context,
+
+        a. sets up :py:class:`IoPump` for stdio, registers
+           stdin_handle for local context.
+        b. Enqueues :py:data:`CALL_FUNCTION <mitogen.core.CALL_FUNCTION>` for
+           :py:func:`_start_slave` invoked in target context,
+
+            i. the program from the `ssh` command line is started
+            ii. sets up :py:class:`IoPump` for `ssh` command line process's
+                stdio pipes
+            iii. returns `(control_handle, stdin_handle)` to
+                 :py:func:`_fakessh_main`
+
+    5. :py:func:`_fakessh_main` receives control/stdin handles from from
+       :py:func:`_start_slave`,
+
+        a. registers remote's stdin_handle with local :py:class:`IoPump`.
+        b. sends `("start", local_stdin_handle)` to remote's control_handle
+        c. registers local :py:class:`IoPump` with
+           :py:class:`mitogen.core.Broker`.
+        d. loops waiting for `local stdout closed && remote stdout closed`
+
+    6. :py:func:`_start_slave` control channel receives `("start", stdin_handle)`,
+
+        a. registers remote's stdin_handle with local :py:class:`IoPump`
+        b. registers local :py:class:`IoPump` with
+           :py:class:`mitogen.core.Broker`.
+        c. loops waiting for `local stdout closed && remote stdout closed`
+"""
+
+import getopt
+import inspect
+import os
+import shutil
+import socket
+import subprocess
+import sys
+import tempfile
+import threading
+
+import mitogen.core
+import mitogen.master
+import mitogen.parent
+
+from mitogen.core import LOG, IOLOG
+
+
+SSH_GETOPTS = (
+    "1246ab:c:e:fgi:kl:m:no:p:qstvx"
+    "ACD:E:F:I:KL:MNO:PQ:R:S:TVw:W:XYy"
+)
+
+_mitogen = None
+
+
+class IoPump(mitogen.core.Protocol):
+    _output_buf = ''
+    _closed = False
+
+    def __init__(self, broker):
+        self._broker = broker
+
+    def write(self, s):
+        self._output_buf += s
+        self._broker._start_transmit(self)
+
+    def close(self):
+        self._closed = True
+        # If local process hasn't exitted yet, ensure its write buffer is
+        # drained before lazily triggering disconnect in on_transmit.
+        if self.transmit_side.fp.fileno() is not None:
+            self._broker._start_transmit(self)
+
+    def on_shutdown(self, stream, broker):
+        self.close()
+
+    def on_transmit(self, stream, broker):
+        written = self.transmit_side.write(self._output_buf)
+        IOLOG.debug('%r.on_transmit() -> len %r', self, written)
+        if written is None:
+            self.on_disconnect(broker)
+        else:
+            self._output_buf = self._output_buf[written:]
+
+        if not self._output_buf:
+            broker._stop_transmit(self)
+            if self._closed:
+                self.on_disconnect(broker)
+
+    def on_receive(self, stream, broker):
+        s = stream.receive_side.read()
+        IOLOG.debug('%r.on_receive() -> len %r', self, len(s))
+        if s:
+            mitogen.core.fire(self, 'receive', s)
+        else:
+            self.on_disconnect(broker)
+
+    def __repr__(self):
+        return 'IoPump(%r, %r)' % (
+            self.receive_side.fp.fileno(),
+            self.transmit_side.fp.fileno(),
+        )
+
+
+class Process(object):
+    """
+    Manages the lifetime and pipe connections of the SSH command running in the
+    slave.
+    """
+    def __init__(self, router, stdin, stdout, proc=None):
+        self.router = router
+        self.stdin = stdin
+        self.stdout = stdout
+        self.proc = proc
+        self.control_handle = router.add_handler(self._on_control)
+        self.stdin_handle = router.add_handler(self._on_stdin)
+        self.pump = IoPump.build_stream(router.broker)
+        self.pump.accept(stdin, stdout)
+        self.stdin = None
+        self.control = None
+        self.wake_event = threading.Event()
+
+        mitogen.core.listen(self.pump, 'disconnect', self._on_pump_disconnect)
+        mitogen.core.listen(self.pump, 'receive', self._on_pump_receive)
+
+        if proc:
+            pmon = mitogen.parent.ProcessMonitor.instance()
+            pmon.add(proc.pid, self._on_proc_exit)
+
+    def __repr__(self):
+        return 'Process(%r, %r)' % (self.stdin, self.stdout)
+
+    def _on_proc_exit(self, status):
+        LOG.debug('%r._on_proc_exit(%r)', self, status)
+        self.control.put(('exit', status))
+
+    def _on_stdin(self, msg):
+        if msg.is_dead:
+            IOLOG.debug('%r._on_stdin() -> %r', self, data)
+            self.pump.protocol.close()
+            return
+
+        data = msg.unpickle()
+        IOLOG.debug('%r._on_stdin() -> len %d', self, len(data))
+        self.pump.protocol.write(data)
+
+    def _on_control(self, msg):
+        if not msg.is_dead:
+            command, arg = msg.unpickle(throw=False)
+            LOG.debug('%r._on_control(%r, %s)', self, command, arg)
+
+            func = getattr(self, '_on_%s' % (command,), None)
+            if func:
+                return func(msg, arg)
+
+            LOG.warning('%r: unknown command %r', self, command)
+
+    def _on_start(self, msg, arg):
+        dest = mitogen.core.Context(self.router, msg.src_id)
+        self.control = mitogen.core.Sender(dest, arg[0])
+        self.stdin = mitogen.core.Sender(dest, arg[1])
+        self.router.broker.start_receive(self.pump)
+
+    def _on_exit(self, msg, arg):
+        LOG.debug('on_exit: proc = %r', self.proc)
+        if self.proc:
+            self.proc.terminate()
+        else:
+            self.router.broker.shutdown()
+
+    def _on_pump_receive(self, s):
+        IOLOG.info('%r._on_pump_receive(len %d)', self, len(s))
+        self.stdin.put(s)
+
+    def _on_pump_disconnect(self):
+        LOG.debug('%r._on_pump_disconnect()', self)
+        mitogen.core.fire(self, 'disconnect')
+        self.stdin.close()
+        self.wake_event.set()
+
+    def start_master(self, stdin, control):
+        self.stdin = stdin
+        self.control = control
+        control.put(('start', (self.control_handle, self.stdin_handle)))
+        self.router.broker.start_receive(self.pump)
+
+    def wait(self):
+        while not self.wake_event.isSet():
+            # Timeout is used so that sleep is interruptible, as blocking
+            # variants of libc thread operations cannot be interrupted e.g. via
+            # KeyboardInterrupt. isSet() test and wait() are separate since in
+            # <2.7 wait() always returns None.
+            self.wake_event.wait(0.1)
+
+
+@mitogen.core.takes_router
+def _start_slave(src_id, cmdline, router):
+    """
+    This runs in the target context, it is invoked by _fakessh_main running in
+    the fakessh context immediately after startup. It starts the slave process
+    (the the point where it has a stdin_handle to target but not stdout_chan to
+    write to), and waits for main to.
+    """
+    LOG.debug('_start_slave(%r, %r)', router, cmdline)
+
+    proc = subprocess.Popen(
+        cmdline,
+        # SSH server always uses user's shell.
+        shell=True,
+        # SSH server always executes new commands in the user's HOME.
+        cwd=os.path.expanduser('~'),
+
+        stdin=subprocess.PIPE,
+        stdout=subprocess.PIPE,
+    )
+
+    process = Process(router, proc.stdin, proc.stdout, proc)
+    return process.control_handle, process.stdin_handle
+
+
+#
+# SSH client interface.
+#
+
+
+def exit():
+    _mitogen.broker.shutdown()
+
+
+def die(msg, *args):
+    if args:
+        msg %= args
+    sys.stderr.write('%s\n' % (msg,))
+    exit()
+
+
+def parse_args():
+    hostname = None
+    remain = sys.argv[1:]
+    allopts = []
+    restarted = 0
+
+    while remain and restarted < 2:
+        opts, args = getopt.getopt(remain, SSH_GETOPTS)
+        remain = remain[:]  # getopt bug!
+        allopts += opts
+        if not args:
+            break
+
+        if not hostname:
+            hostname = args.pop(0)
+            remain = remain[remain.index(hostname) + 1:]
+
+        restarted += 1
+
+    return hostname, allopts, args
+
+
+@mitogen.core.takes_econtext
+def _fakessh_main(dest_context_id, econtext):
+    hostname, opts, args = parse_args()
+    if not hostname:
+        die('Missing hostname')
+
+    subsystem = False
+    for opt, optarg in opts:
+        if opt == '-s':
+            subsystem = True
+        else:
+            LOG.debug('Warning option %s %s is ignored.', opt, optarg)
+
+    LOG.debug('hostname: %r', hostname)
+    LOG.debug('opts: %r', opts)
+    LOG.debug('args: %r', args)
+
+    if subsystem:
+        die('-s <subsystem> is not yet supported')
+
+    if not args:
+        die('fakessh: login mode not supported and no command specified')
+
+    dest = mitogen.parent.Context(econtext.router, dest_context_id)
+
+    # Even though SSH receives an argument vector, it still cats the vector
+    # together before sending to the server, the server just uses /bin/sh -c to
+    # run the command. We must remain puke-for-puke compatible.
+    control_handle, stdin_handle = dest.call(_start_slave,
+        mitogen.context_id, ' '.join(args))
+
+    LOG.debug('_fakessh_main: received control_handle=%r, stdin_handle=%r',
+              control_handle, stdin_handle)
+
+    process = Process(econtext.router,
+                      stdin=os.fdopen(1, 'w+b', 0),
+                      stdout=os.fdopen(0, 'r+b', 0))
+    process.start_master(
+        stdin=mitogen.core.Sender(dest, stdin_handle),
+        control=mitogen.core.Sender(dest, control_handle),
+    )
+    process.wait()
+    process.control.put(('exit', None))
+
+
+def _get_econtext_config(context, sock2):
+    parent_ids = mitogen.parent_ids[:]
+    parent_ids.insert(0, mitogen.context_id)
+    return {
+        'context_id': context.context_id,
+        'core_src_fd': None,
+        'debug': getattr(context.router, 'debug', False),
+        'in_fd': sock2.fileno(),
+        'log_level': mitogen.parent.get_log_level(),
+        'max_message_size': context.router.max_message_size,
+        'out_fd': sock2.fileno(),
+        'parent_ids': parent_ids,
+        'profiling': getattr(context.router, 'profiling', False),
+        'unidirectional': getattr(context.router, 'unidirectional', False),
+        'setup_stdio': False,
+        'version': mitogen.__version__,
+    }
+
+
+#
+# Public API.
+#
+
+@mitogen.core.takes_econtext
+@mitogen.core.takes_router
+def run(dest, router, args, deadline=None, econtext=None):
+    """
+    Run the command specified by `args` such that ``PATH`` searches for SSH by
+    the command will cause its attempt to use SSH to execute a remote program
+    to be redirected to use mitogen to execute that program using the context
+    `dest` instead.
+
+    :param list args:
+        Argument vector.
+    :param mitogen.core.Context dest:
+        The destination context to execute the SSH command line in.
+
+    :param mitogen.core.Router router:
+
+    :param list[str] args:
+        Command line arguments for local program, e.g.
+        ``['rsync', '/tmp', 'remote:/tmp']``
+
+    :returns:
+        Exit status of the child process.
+    """
+    if econtext is not None:
+        mitogen.parent.upgrade_router(econtext)
+
+    context_id = router.allocate_id()
+    fakessh = mitogen.parent.Context(router, context_id)
+    fakessh.name = u'fakessh.%d' % (context_id,)
+
+    sock1, sock2 = socket.socketpair()
+
+    stream = mitogen.core.Stream(router, context_id)
+    stream.name = u'fakessh'
+    stream.accept(sock1, sock1)
+    router.register(fakessh, stream)
+
+    # Held in socket buffer until process is booted.
+    fakessh.call_async(_fakessh_main, dest.context_id)
+
+    tmp_path = tempfile.mkdtemp(prefix='mitogen_fakessh')
+    try:
+        ssh_path = os.path.join(tmp_path, 'ssh')
+        fp = open(ssh_path, 'w')
+        try:
+            fp.write('#!%s\n' % (mitogen.parent.get_sys_executable(),))
+            fp.write(inspect.getsource(mitogen.core))
+            fp.write('\n')
+            fp.write('ExternalContext(%r).main()\n' % (
+                _get_econtext_config(context, sock2),
+            ))
+        finally:
+            fp.close()
+
+        os.chmod(ssh_path, int('0755', 8))
+        env = os.environ.copy()
+        env.update({
+            'PATH': '%s:%s' % (tmp_path, env.get('PATH', '')),
+            'ARGV0': mitogen.parent.get_sys_executable(),
+            'SSH_PATH': ssh_path,
+        })
+
+        proc = subprocess.Popen(args, env=env)
+        return proc.wait()
+    finally:
+        shutil.rmtree(tmp_path)
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/fork.py b/deployment/lib/mitogen-0.2.9/mitogen/fork.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0c2d7e7c74c6a6222eea0f01b945100b07320d0
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/fork.py
@@ -0,0 +1,250 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import errno
+import logging
+import os
+import random
+import sys
+import threading
+import traceback
+
+import mitogen.core
+import mitogen.parent
+from mitogen.core import b
+
+
+LOG = logging.getLogger(__name__)
+
+# Python 2.4/2.5 cannot support fork+threads whatsoever, it doesn't even fix up
+# interpreter state. So 2.4/2.5 interpreters start .local() contexts for
+# isolation instead. Since we don't have any crazy memory sharing problems to
+# avoid, there is no virginal fork parent either. The child is started directly
+# from the login/become process. In future this will be default everywhere,
+# fork is brainwrong from the stone age.
+FORK_SUPPORTED = sys.version_info >= (2, 6)
+
+
+class Error(mitogen.core.StreamError):
+    pass
+
+
+def fixup_prngs():
+    """
+    Add 256 bits of /dev/urandom to OpenSSL's PRNG in the child, and re-seed
+    the random package with the same data.
+    """
+    s = os.urandom(256 // 8)
+    random.seed(s)
+    if 'ssl' in sys.modules:
+        sys.modules['ssl'].RAND_add(s, 75.0)
+
+
+def reset_logging_framework():
+    """
+    After fork, ensure any logging.Handler locks are recreated, as a variety of
+    threads in the parent may have been using the logging package at the moment
+    of fork.
+
+    It is not possible to solve this problem in general; see :gh:issue:`150`
+    for a full discussion.
+    """
+    logging._lock = threading.RLock()
+
+    # The root logger does not appear in the loggerDict.
+    logging.Logger.manager.loggerDict = {}
+    logging.getLogger().handlers = []
+
+
+def on_fork():
+    """
+    Should be called by any program integrating Mitogen each time the process
+    is forked, in the context of the new child.
+    """
+    reset_logging_framework()  # Must be first!
+    fixup_prngs()
+    mitogen.core.Latch._on_fork()
+    mitogen.core.Side._on_fork()
+    mitogen.core.ExternalContext.service_stub_lock = threading.Lock()
+
+    mitogen__service = sys.modules.get('mitogen.service')
+    if mitogen__service:
+        mitogen__service._pool_lock = threading.Lock()
+
+
+def handle_child_crash():
+    """
+    Respond to _child_main() crashing by ensuring the relevant exception is
+    logged to /dev/tty.
+    """
+    tty = open('/dev/tty', 'wb')
+    tty.write('\n\nFORKED CHILD PID %d CRASHED\n%s\n\n' % (
+        os.getpid(),
+        traceback.format_exc(),
+    ))
+    tty.close()
+    os._exit(1)
+
+
+def _convert_exit_status(status):
+    """
+    Convert a :func:`os.waitpid`-style exit status to a :mod:`subprocess` style
+    exit status.
+    """
+    if os.WIFEXITED(status):
+        return os.WEXITSTATUS(status)
+    elif os.WIFSIGNALED(status):
+        return -os.WTERMSIG(status)
+    elif os.WIFSTOPPED(status):
+        return -os.WSTOPSIG(status)
+
+
+class Process(mitogen.parent.Process):
+    def poll(self):
+        try:
+            pid, status = os.waitpid(self.pid, os.WNOHANG)
+        except OSError:
+            e = sys.exc_info()[1]
+            if e.args[0] == errno.ECHILD:
+                LOG.warn('%r: waitpid(%r) produced ECHILD', self, self.pid)
+                return
+            raise
+
+        if not pid:
+            return
+        return _convert_exit_status(status)
+
+
+class Options(mitogen.parent.Options):
+    #: Reference to the importer, if any, recovered from the parent.
+    importer = None
+
+    #: User-supplied function for cleaning up child process state.
+    on_fork = None
+
+    def __init__(self, old_router, max_message_size, on_fork=None, debug=False,
+                 profiling=False, unidirectional=False, on_start=None,
+                 name=None):
+        if not FORK_SUPPORTED:
+            raise Error(self.python_version_msg)
+
+        # fork method only supports a tiny subset of options.
+        super(Options, self).__init__(
+            max_message_size=max_message_size, debug=debug,
+            profiling=profiling, unidirectional=unidirectional, name=name,
+        )
+        self.on_fork = on_fork
+        self.on_start = on_start
+
+        responder = getattr(old_router, 'responder', None)
+        if isinstance(responder, mitogen.parent.ModuleForwarder):
+            self.importer = responder.importer
+
+
+class Connection(mitogen.parent.Connection):
+    options_class = Options
+    child_is_immediate_subprocess = True
+
+    python_version_msg = (
+        "The mitogen.fork method is not supported on Python versions "
+        "prior to 2.6, since those versions made no attempt to repair "
+        "critical interpreter state following a fork. Please use the "
+        "local() method instead."
+    )
+
+    name_prefix = u'fork'
+
+    def start_child(self):
+        parentfp, childfp = mitogen.parent.create_socketpair()
+        pid = os.fork()
+        if pid:
+            childfp.close()
+            return Process(pid, stdin=parentfp, stdout=parentfp)
+        else:
+            parentfp.close()
+            self._wrap_child_main(childfp)
+
+    def _wrap_child_main(self, childfp):
+        try:
+            self._child_main(childfp)
+        except BaseException:
+            handle_child_crash()
+
+    def get_econtext_config(self):
+        config = super(Connection, self).get_econtext_config()
+        config['core_src_fd'] = None
+        config['importer'] = self.options.importer
+        config['send_ec2'] = False
+        config['setup_package'] = False
+        if self.options.on_start:
+            config['on_start'] = self.options.on_start
+        return config
+
+    def _child_main(self, childfp):
+        on_fork()
+        if self.options.on_fork:
+            self.options.on_fork()
+        mitogen.core.set_block(childfp.fileno())
+
+        childfp.send(b('MITO002\n'))
+
+        # Expected by the ExternalContext.main().
+        os.dup2(childfp.fileno(), 1)
+        os.dup2(childfp.fileno(), 100)
+
+        # Overwritten by ExternalContext.main(); we must replace the
+        # parent-inherited descriptors that were closed by Side._on_fork() to
+        # avoid ExternalContext.main() accidentally allocating new files over
+        # the standard handles.
+        os.dup2(childfp.fileno(), 0)
+
+        # Avoid corrupting the stream on fork crash by dupping /dev/null over
+        # stderr. Instead, handle_child_crash() uses /dev/tty to log errors.
+        devnull = os.open('/dev/null', os.O_WRONLY)
+        if devnull != 2:
+            os.dup2(devnull, 2)
+            os.close(devnull)
+
+        # If we're unlucky, childfp.fileno() may coincidentally be one of our
+        # desired FDs. In that case closing it breaks ExternalContext.main().
+        if childfp.fileno() not in (0, 1, 100):
+            childfp.close()
+
+        mitogen.core.IOLOG.setLevel(logging.INFO)
+
+        try:
+            try:
+                mitogen.core.ExternalContext(self.get_econtext_config()).main()
+            except Exception:
+                # TODO: report exception somehow.
+                os._exit(72)
+        finally:
+            # Don't trigger atexit handlers, they were copied from the parent.
+            os._exit(0)
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/jail.py b/deployment/lib/mitogen-0.2.9/mitogen/jail.py
new file mode 100644
index 0000000000000000000000000000000000000000..4da7eb0df25a82916dc41e305170ccb80283372b
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/jail.py
@@ -0,0 +1,65 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import mitogen.core
+import mitogen.parent
+
+
+class Options(mitogen.parent.Options):
+    container = None
+    username = None
+    jexec_path = u'/usr/sbin/jexec'
+
+    def __init__(self, container, jexec_path=None, username=None, **kwargs):
+        super(Options, self).__init__(**kwargs)
+        self.container = mitogen.core.to_text(container)
+        if username:
+            self.username = mitogen.core.to_text(username)
+        if jexec_path:
+            self.jexec_path = jexec_path
+
+
+class Connection(mitogen.parent.Connection):
+    options_class = Options
+
+    child_is_immediate_subprocess = False
+    create_child_args = {
+        'merge_stdio': True
+    }
+
+    def _get_name(self):
+        return u'jail.' + self.options.container
+
+    def get_boot_command(self):
+        bits = [self.options.jexec_path]
+        if self.options.username:
+            bits += ['-U', self.options.username]
+        bits += [self.options.container]
+        return bits + super(Connection, self).get_boot_command()
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/kubectl.py b/deployment/lib/mitogen-0.2.9/mitogen/kubectl.py
new file mode 100644
index 0000000000000000000000000000000000000000..374ab7470c174e744c3fb6097cd0b7325ac7c9e0
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/kubectl.py
@@ -0,0 +1,67 @@
+# Copyright 2018, Yannig Perre
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import mitogen.core
+import mitogen.parent
+
+
+class Options(mitogen.parent.Options):
+    pod = None
+    kubectl_path = 'kubectl'
+    kubectl_args = None
+
+    def __init__(self, pod, kubectl_path=None, kubectl_args=None, **kwargs):
+        super(Options, self).__init__(**kwargs)
+        assert pod
+        self.pod = pod
+        if kubectl_path:
+            self.kubectl_path = kubectl_path
+        self.kubectl_args = kubectl_args or []
+
+
+class Connection(mitogen.parent.Connection):
+    options_class = Options
+    child_is_immediate_subprocess = True
+
+    # TODO: better way of capturing errors such as "No such container."
+    create_child_args = {
+        'merge_stdio': True
+    }
+
+    def _get_name(self):
+        return u'kubectl.%s%s' % (self.options.pod, self.options.kubectl_args)
+
+    def get_boot_command(self):
+        bits = [
+            self.options.kubectl_path
+        ] + self.options.kubectl_args + [
+            'exec', '-it', self.options.pod
+        ]
+        return bits + ["--"] + super(Connection, self).get_boot_command()
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/lxc.py b/deployment/lib/mitogen-0.2.9/mitogen/lxc.py
new file mode 100644
index 0000000000000000000000000000000000000000..a86ce5f0fbd75aa35e1c663855853e9d2f0d957b
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/lxc.py
@@ -0,0 +1,74 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import mitogen.core
+import mitogen.parent
+
+
+class Options(mitogen.parent.Options):
+    container = None
+    lxc_attach_path = 'lxc-attach'
+
+    def __init__(self, container, lxc_attach_path=None, **kwargs):
+        super(Options, self).__init__(**kwargs)
+        self.container = container
+        if lxc_attach_path:
+            self.lxc_attach_path = lxc_attach_path
+
+
+class Connection(mitogen.parent.Connection):
+    options_class = Options
+
+    child_is_immediate_subprocess = False
+    create_child_args = {
+        # If lxc-attach finds any of stdin, stdout, stderr connected to a TTY,
+        # to prevent input injection it creates a proxy pty, forcing all IO to
+        # be buffered in <4KiB chunks. So ensure stderr is also routed to the
+        # socketpair.
+        'merge_stdio': True
+    }
+
+    eof_error_hint = (
+        'Note: many versions of LXC do not report program execution failure '
+        'meaningfully. Please check the host logs (/var/log) for more '
+        'information.'
+    )
+
+    def _get_name(self):
+        return u'lxc.' + self.options.container
+
+    def get_boot_command(self):
+        bits = [
+            self.options.lxc_attach_path,
+            '--clear-env',
+            '--name', self.options.container,
+            '--',
+        ]
+        return bits + super(Connection, self).get_boot_command()
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/lxd.py b/deployment/lib/mitogen-0.2.9/mitogen/lxd.py
new file mode 100644
index 0000000000000000000000000000000000000000..675dddcdc738a5e509893217f6d32913d4130225
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/lxd.py
@@ -0,0 +1,76 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import mitogen.core
+import mitogen.parent
+
+
+class Options(mitogen.parent.Options):
+    container = None
+    lxc_path = 'lxc'
+    python_path = 'python'
+
+    def __init__(self, container, lxc_path=None, **kwargs):
+        super(Options, self).__init__(**kwargs)
+        self.container = container
+        if lxc_path:
+            self.lxc_path = lxc_path
+
+
+class Connection(mitogen.parent.Connection):
+    options_class = Options
+
+    child_is_immediate_subprocess = False
+    create_child_args = {
+        # If lxc finds any of stdin, stdout, stderr connected to a TTY, to
+        # prevent input injection it creates a proxy pty, forcing all IO to be
+        # buffered in <4KiB chunks. So ensure stderr is also routed to the
+        # socketpair.
+        'merge_stdio': True
+    }
+
+    eof_error_hint = (
+        'Note: many versions of LXC do not report program execution failure '
+        'meaningfully. Please check the host logs (/var/log) for more '
+        'information.'
+    )
+
+    def _get_name(self):
+        return u'lxd.' + self.options.container
+
+    def get_boot_command(self):
+        bits = [
+            self.options.lxc_path,
+            'exec',
+            '--mode=noninteractive',
+            self.options.container,
+            '--',
+        ]
+        return bits + super(Connection, self).get_boot_command()
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/master.py b/deployment/lib/mitogen-0.2.9/mitogen/master.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9ddf3ddaabc9c4780d31c4e7eb5988df6373440
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/master.py
@@ -0,0 +1,1357 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+This module implements functionality required by master processes, such as
+starting new contexts via SSH. Its size is also restricted, since it must
+be sent to any context that will be used to establish additional child
+contexts.
+"""
+
+import dis
+import errno
+import imp
+import inspect
+import itertools
+import logging
+import os
+import pkgutil
+import re
+import string
+import sys
+import threading
+import types
+import zlib
+
+try:
+    import sysconfig
+except ImportError:
+    sysconfig = None
+
+if not hasattr(pkgutil, 'find_loader'):
+    # find_loader() was new in >=2.5, but the modern pkgutil.py syntax has
+    # been kept intentionally 2.3 compatible so we can reuse it.
+    from mitogen.compat import pkgutil
+
+import mitogen
+import mitogen.core
+import mitogen.minify
+import mitogen.parent
+
+from mitogen.core import b
+from mitogen.core import IOLOG
+from mitogen.core import LOG
+from mitogen.core import str_partition
+from mitogen.core import str_rpartition
+from mitogen.core import to_text
+
+imap = getattr(itertools, 'imap', map)
+izip = getattr(itertools, 'izip', zip)
+
+try:
+    any
+except NameError:
+    from mitogen.core import any
+
+try:
+    next
+except NameError:
+    from mitogen.core import next
+
+
+RLOG = logging.getLogger('mitogen.ctx')
+
+
+def _stdlib_paths():
+    """
+    Return a set of paths from which Python imports the standard library.
+    """
+    attr_candidates = [
+        'prefix',
+        'real_prefix',  # virtualenv: only set inside a virtual environment.
+        'base_prefix',  # venv: always set, equal to prefix if outside.
+    ]
+    prefixes = (getattr(sys, a, None) for a in attr_candidates)
+    version = 'python%s.%s' % sys.version_info[0:2]
+    s = set(os.path.abspath(os.path.join(p, 'lib', version))
+            for p in prefixes if p is not None)
+
+    # When running 'unit2 tests/module_finder_test.py' in a Py2 venv on Ubuntu
+    # 18.10, above is insufficient to catch the real directory.
+    if sysconfig is not None:
+        s.add(sysconfig.get_config_var('DESTLIB'))
+    return s
+
+
+def is_stdlib_name(modname):
+    """
+    Return :data:`True` if `modname` appears to come from the standard library.
+    """
+    if imp.is_builtin(modname) != 0:
+        return True
+
+    module = sys.modules.get(modname)
+    if module is None:
+        return False
+
+    # six installs crap with no __file__
+    modpath = os.path.abspath(getattr(module, '__file__', ''))
+    return is_stdlib_path(modpath)
+
+
+_STDLIB_PATHS = _stdlib_paths()
+
+
+def is_stdlib_path(path):
+    return any(
+        os.path.commonprefix((libpath, path)) == libpath
+        and 'site-packages' not in path
+        and 'dist-packages' not in path
+        for libpath in _STDLIB_PATHS
+    )
+
+
+def get_child_modules(path):
+    """
+    Return the suffixes of submodules directly neated beneath of the package
+    directory at `path`.
+
+    :param str path:
+        Path to the module's source code on disk, or some PEP-302-recognized
+        equivalent. Usually this is the module's ``__file__`` attribute, but
+        is specified explicitly to avoid loading the module.
+
+    :return:
+        List of submodule name suffixes.
+    """
+    it = pkgutil.iter_modules([os.path.dirname(path)])
+    return [to_text(name) for _, name, _ in it]
+
+
+def _looks_like_script(path):
+    """
+    Return :data:`True` if the (possibly extensionless) file at `path`
+    resembles a Python script. For now we simply verify the file contains
+    ASCII text.
+    """
+    try:
+        fp = open(path, 'rb')
+    except IOError:
+        e = sys.exc_info()[1]
+        if e.args[0] == errno.EISDIR:
+            return False
+        raise
+
+    try:
+        sample = fp.read(512).decode('latin-1')
+        return not set(sample).difference(string.printable)
+    finally:
+        fp.close()
+
+
+def _py_filename(path):
+    if not path:
+        return None
+
+    if path[-4:] in ('.pyc', '.pyo'):
+        path = path.rstrip('co')
+
+    if path.endswith('.py'):
+        return path
+
+    if os.path.exists(path) and _looks_like_script(path):
+        return path
+
+
+def _get_core_source():
+    """
+    Master version of parent.get_core_source().
+    """
+    source = inspect.getsource(mitogen.core)
+    return mitogen.minify.minimize_source(source)
+
+
+if mitogen.is_master:
+    # TODO: find a less surprising way of installing this.
+    mitogen.parent._get_core_source = _get_core_source
+
+
+LOAD_CONST = dis.opname.index('LOAD_CONST')
+IMPORT_NAME = dis.opname.index('IMPORT_NAME')
+
+
+def _getarg(nextb, c):
+    if c >= dis.HAVE_ARGUMENT:
+        return nextb() | (nextb() << 8)
+
+
+if sys.version_info < (3, 0):
+    def iter_opcodes(co):
+        # Yield `(op, oparg)` tuples from the code object `co`.
+        ordit = imap(ord, co.co_code)
+        nextb = ordit.next
+        return ((c, _getarg(nextb, c)) for c in ordit)
+elif sys.version_info < (3, 6):
+    def iter_opcodes(co):
+        # Yield `(op, oparg)` tuples from the code object `co`.
+        ordit = iter(co.co_code)
+        nextb = ordit.__next__
+        return ((c, _getarg(nextb, c)) for c in ordit)
+else:
+    def iter_opcodes(co):
+        # Yield `(op, oparg)` tuples from the code object `co`.
+        ordit = iter(co.co_code)
+        nextb = ordit.__next__
+        # https://github.com/abarnert/cpython/blob/c095a32f/Python/wordcode.md
+        return ((c, nextb()) for c in ordit)
+
+
+def scan_code_imports(co):
+    """
+    Given a code object `co`, scan its bytecode yielding any ``IMPORT_NAME``
+    and associated prior ``LOAD_CONST`` instructions representing an `Import`
+    statement or `ImportFrom` statement.
+
+    :return:
+        Generator producing `(level, modname, namelist)` tuples, where:
+
+        * `level`: -1 for normal import, 0, for absolute import, and >0 for
+          relative import.
+        * `modname`: Name of module to import, or from where `namelist` names
+          are imported.
+        * `namelist`: for `ImportFrom`, the list of names to be imported from
+          `modname`.
+    """
+    opit = iter_opcodes(co)
+    opit, opit2, opit3 = itertools.tee(opit, 3)
+
+    try:
+        next(opit2)
+        next(opit3)
+        next(opit3)
+    except StopIteration:
+        return
+
+    if sys.version_info >= (2, 5):
+        for oparg1, oparg2, (op3, arg3) in izip(opit, opit2, opit3):
+            if op3 == IMPORT_NAME:
+                op2, arg2 = oparg2
+                op1, arg1 = oparg1
+                if op1 == op2 == LOAD_CONST:
+                    yield (co.co_consts[arg1],
+                           co.co_names[arg3],
+                           co.co_consts[arg2] or ())
+    else:
+        # Python 2.4 did not yet have 'level', so stack format differs.
+        for oparg1, (op2, arg2) in izip(opit, opit2):
+            if op2 == IMPORT_NAME:
+                op1, arg1 = oparg1
+                if op1 == LOAD_CONST:
+                    yield (-1, co.co_names[arg2], co.co_consts[arg1] or ())
+
+
+class ThreadWatcher(object):
+    """
+    Manage threads that wait for another thread to shut down, before invoking
+    `on_join()` for each associated ThreadWatcher.
+
+    In CPython it seems possible to use this method to ensure a non-main thread
+    is signalled when the main thread has exited, using a third thread as a
+    proxy.
+    """
+    #: Protects remaining _cls_* members.
+    _cls_lock = threading.Lock()
+
+    #: PID of the process that last modified the class data. If the PID
+    #: changes, it means the thread watch dict refers to threads that no longer
+    #: exist in the current process (since it forked), and so must be reset.
+    _cls_pid = None
+
+    #: Map watched Thread -> list of ThreadWatcher instances.
+    _cls_instances_by_target = {}
+
+    #: Map watched Thread -> watcher Thread for each watched thread.
+    _cls_thread_by_target = {}
+
+    @classmethod
+    def _reset(cls):
+        """
+        If we have forked since the watch dictionaries were initialized, all
+        that has is garbage, so clear it.
+        """
+        if os.getpid() != cls._cls_pid:
+            cls._cls_pid = os.getpid()
+            cls._cls_instances_by_target.clear()
+            cls._cls_thread_by_target.clear()
+
+    def __init__(self, target, on_join):
+        self.target = target
+        self.on_join = on_join
+
+    @classmethod
+    def _watch(cls, target):
+        target.join()
+        for watcher in cls._cls_instances_by_target[target]:
+            watcher.on_join()
+
+    def install(self):
+        self._cls_lock.acquire()
+        try:
+            self._reset()
+            lst = self._cls_instances_by_target.setdefault(self.target, [])
+            lst.append(self)
+            if self.target not in self._cls_thread_by_target:
+                self._cls_thread_by_target[self.target] = threading.Thread(
+                    name='mitogen.master.join_thread_async',
+                    target=self._watch,
+                    args=(self.target,)
+                )
+                self._cls_thread_by_target[self.target].start()
+        finally:
+            self._cls_lock.release()
+
+    def remove(self):
+        self._cls_lock.acquire()
+        try:
+            self._reset()
+            lst = self._cls_instances_by_target.get(self.target, [])
+            if self in lst:
+                lst.remove(self)
+        finally:
+            self._cls_lock.release()
+
+    @classmethod
+    def watch(cls, target, on_join):
+        watcher = cls(target, on_join)
+        watcher.install()
+        return watcher
+
+
+class LogForwarder(object):
+    """
+    Install a :data:`mitogen.core.FORWARD_LOG` handler that delivers forwarded
+    log events into the local logging framework. This is used by the master's
+    :class:`Router`.
+
+    The forwarded :class:`logging.LogRecord` objects are delivered to loggers
+    under ``mitogen.ctx.*`` corresponding to their
+    :attr:`mitogen.core.Context.name`, with the message prefixed with the
+    logger name used in the child. The records include some extra attributes:
+
+    * ``mitogen_message``: Unicode original message without the logger name
+      prepended.
+    * ``mitogen_context``: :class:`mitogen.parent.Context` reference to the
+      source context.
+    * ``mitogen_name``: Original logger name.
+
+    :param mitogen.master.Router router:
+        Router to install the handler on.
+    """
+    def __init__(self, router):
+        self._router = router
+        self._cache = {}
+        router.add_handler(
+            fn=self._on_forward_log,
+            handle=mitogen.core.FORWARD_LOG,
+        )
+
+    def _on_forward_log(self, msg):
+        if msg.is_dead:
+            return
+
+        context = self._router.context_by_id(msg.src_id)
+        if context is None:
+            LOG.error('%s: dropping log from unknown context %d',
+                      self, msg.src_id)
+            return
+
+        name, level_s, s = msg.data.decode('utf-8', 'replace').split('\x00', 2)
+
+        logger_name = '%s.[%s]' % (name, context.name)
+        logger = self._cache.get(logger_name)
+        if logger is None:
+            self._cache[logger_name] = logger = logging.getLogger(logger_name)
+
+        # See logging.Handler.makeRecord()
+        record = logging.LogRecord(
+            name=logger.name,
+            level=int(level_s),
+            pathname='(unknown file)',
+            lineno=0,
+            msg=s,
+            args=(),
+            exc_info=None,
+        )
+        record.mitogen_message = s
+        record.mitogen_context = self._router.context_by_id(msg.src_id)
+        record.mitogen_name = name
+        logger.handle(record)
+
+    def __repr__(self):
+        return 'LogForwarder(%r)' % (self._router,)
+
+
+class FinderMethod(object):
+    """
+    Interface to a method for locating a Python module or package given its
+    name according to the running Python interpreter. You'd think this was a
+    simple task, right? Naive young fellow, welcome to the real world.
+    """
+    def __repr__(self):
+        return '%s()' % (type(self).__name__,)
+
+    def find(self, fullname):
+        """
+        Accept a canonical module name as would be found in :data:`sys.modules`
+        and return a `(path, source, is_pkg)` tuple, where:
+
+        * `path`: Unicode string containing path to source file.
+        * `source`: Bytestring containing source file's content.
+        * `is_pkg`: :data:`True` if `fullname` is a package.
+
+        :returns:
+            :data:`None` if not found, or tuple as described above.
+        """
+        raise NotImplementedError()
+
+
+class DefectivePython3xMainMethod(FinderMethod):
+    """
+    Recent versions of Python 3.x introduced an incomplete notion of
+    importer specs, and in doing so created permanent asymmetry in the
+    :mod:`pkgutil` interface handling for the :mod:`__main__` module. Therefore
+    we must handle :mod:`__main__` specially.
+    """
+    def find(self, fullname):
+        """
+        Find :mod:`__main__` using its :data:`__file__` attribute.
+        """
+        if fullname != '__main__':
+            return None
+
+        mod = sys.modules.get(fullname)
+        if not mod:
+            return None
+
+        path = getattr(mod, '__file__', None)
+        if not (path is not None and os.path.exists(path) and _looks_like_script(path)):
+            return None
+
+        fp = open(path, 'rb')
+        try:
+            source = fp.read()
+        finally:
+            fp.close()
+
+        return path, source, False
+
+
+class PkgutilMethod(FinderMethod):
+    """
+    Attempt to fetch source code via pkgutil. In an ideal world, this would
+    be the only required implementation of get_module().
+    """
+    def find(self, fullname):
+        """
+        Find `fullname` using :func:`pkgutil.find_loader`.
+        """
+        try:
+            # Pre-'import spec' this returned None, in Python3.6 it raises
+            # ImportError.
+            loader = pkgutil.find_loader(fullname)
+        except ImportError:
+            e = sys.exc_info()[1]
+            LOG.debug('%r._get_module_via_pkgutil(%r): %s',
+                      self, fullname, e)
+            return None
+
+        IOLOG.debug('%r._get_module_via_pkgutil(%r) -> %r',
+                    self, fullname, loader)
+        if not loader:
+            return
+
+        try:
+            path = _py_filename(loader.get_filename(fullname))
+            source = loader.get_source(fullname)
+            is_pkg = loader.is_package(fullname)
+        except (AttributeError, ImportError):
+            # - Per PEP-302, get_source() and is_package() are optional,
+            #   calling them may throw AttributeError.
+            # - get_filename() may throw ImportError if pkgutil.find_loader()
+            #   picks a "parent" package's loader for some crap that's been
+            #   stuffed in sys.modules, for example in the case of urllib3:
+            #       "loader for urllib3.contrib.pyopenssl cannot handle
+            #        requests.packages.urllib3.contrib.pyopenssl"
+            e = sys.exc_info()[1]
+            LOG.debug('%r: loading %r using %r failed: %s',
+                      self, fullname, loader, e)
+            return
+
+        if path is None or source is None:
+            return
+
+        if isinstance(source, mitogen.core.UnicodeType):
+            # get_source() returns "string" according to PEP-302, which was
+            # reinterpreted for Python 3 to mean a Unicode string.
+            source = source.encode('utf-8')
+
+        return path, source, is_pkg
+
+
+class SysModulesMethod(FinderMethod):
+    """
+    Attempt to fetch source code via :data:`sys.modules`. This was originally
+    specifically to support :mod:`__main__`, but it may catch a few more cases.
+    """
+    def find(self, fullname):
+        """
+        Find `fullname` using its :data:`__file__` attribute.
+        """
+        module = sys.modules.get(fullname)
+        if not isinstance(module, types.ModuleType):
+            LOG.debug('%r: sys.modules[%r] absent or not a regular module',
+                      self, fullname)
+            return
+
+        LOG.debug('_get_module_via_sys_modules(%r) -> %r', fullname, module)
+        alleged_name = getattr(module, '__name__', None)
+        if alleged_name != fullname:
+            LOG.debug('sys.modules[%r].__name__ is incorrect, assuming '
+                      'this is a hacky module alias and ignoring it. '
+                      'Got %r, module object: %r',
+                      fullname, alleged_name, module)
+            return
+
+        path = _py_filename(getattr(module, '__file__', ''))
+        if not path:
+            return
+
+        LOG.debug('%r: sys.modules[%r]: found %s', self, fullname, path)
+        is_pkg = hasattr(module, '__path__')
+        try:
+            source = inspect.getsource(module)
+        except IOError:
+            # Work around inspect.getsourcelines() bug for 0-byte __init__.py
+            # files.
+            if not is_pkg:
+                raise
+            source = '\n'
+
+        if isinstance(source, mitogen.core.UnicodeType):
+            # get_source() returns "string" according to PEP-302, which was
+            # reinterpreted for Python 3 to mean a Unicode string.
+            source = source.encode('utf-8')
+
+        return path, source, is_pkg
+
+
+class ParentEnumerationMethod(FinderMethod):
+    """
+    Attempt to fetch source code by examining the module's (hopefully less
+    insane) parent package, and if no insane parents exist, simply use
+    :mod:`sys.path` to search for it from scratch on the filesystem using the
+    normal Python lookup mechanism.
+    
+    This is required for older versions of :mod:`ansible.compat.six`,
+    :mod:`plumbum.colors`, Ansible 2.8 :mod:`ansible.module_utils.distro` and
+    its submodule :mod:`ansible.module_utils.distro._distro`.
+
+    When some package dynamically replaces itself in :data:`sys.modules`, but
+    only conditionally according to some program logic, it is possible that
+    children may attempt to load modules and subpackages from it that can no
+    longer be resolved by examining a (corrupted) parent.
+
+    For cases like :mod:`ansible.module_utils.distro`, this must handle cases
+    where a package transmuted itself into a totally unrelated module during
+    import and vice versa, where :data:`sys.modules` is replaced with junk that
+    makes it impossible to discover the loaded module using the in-memory
+    module object or any parent package's :data:`__path__`, since they have all
+    been overwritten. Some men just want to watch the world burn.
+    """
+    def _find_sane_parent(self, fullname):
+        """
+        Iteratively search :data:`sys.modules` for the least indirect parent of
+        `fullname` that is loaded and contains a :data:`__path__` attribute.
+
+        :return:
+            `(parent_name, path, modpath)` tuple, where:
+
+                * `modname`: canonical name of the found package, or the empty
+                   string if none is found.
+                * `search_path`: :data:`__path__` attribute of the least
+                   indirect parent found, or :data:`None` if no indirect parent
+                   was found.
+                * `modpath`: list of module name components leading from `path`
+                   to the target module.
+        """
+        path = None
+        modpath = []
+        while True:
+            pkgname, _, modname = str_rpartition(to_text(fullname), u'.')
+            modpath.insert(0, modname)
+            if not pkgname:
+                return [], None, modpath
+
+            pkg = sys.modules.get(pkgname)
+            path = getattr(pkg, '__path__', None)
+            if pkg and path:
+                return pkgname.split('.'), path, modpath
+
+            LOG.debug('%r: %r lacks __path__ attribute', self, pkgname)
+            fullname = pkgname
+
+    def _found_package(self, fullname, path):
+        path = os.path.join(path, '__init__.py')
+        LOG.debug('%r: %r is PKG_DIRECTORY: %r', self, fullname, path)
+        return self._found_module(
+            fullname=fullname,
+            path=path,
+            fp=open(path, 'rb'),
+            is_pkg=True,
+        )
+
+    def _found_module(self, fullname, path, fp, is_pkg=False):
+        try:
+            path = _py_filename(path)
+            if not path:
+                return
+
+            source = fp.read()
+        finally:
+            if fp:
+                fp.close()
+
+        if isinstance(source, mitogen.core.UnicodeType):
+            # get_source() returns "string" according to PEP-302, which was
+            # reinterpreted for Python 3 to mean a Unicode string.
+            source = source.encode('utf-8')
+        return path, source, is_pkg
+
+    def _find_one_component(self, modname, search_path):
+        try:
+            #fp, path, (suffix, _, kind) = imp.find_module(modname, search_path)
+            return imp.find_module(modname, search_path)
+        except ImportError:
+            e = sys.exc_info()[1]
+            LOG.debug('%r: imp.find_module(%r, %r) -> %s',
+                      self, modname, [search_path], e)
+            return None
+
+    def find(self, fullname):
+        """
+        See implementation for a description of how this works.
+        """
+        #if fullname not in sys.modules:
+            # Don't attempt this unless a module really exists in sys.modules,
+            # else we could return junk.
+            #return
+
+        fullname = to_text(fullname)
+        modname, search_path, modpath = self._find_sane_parent(fullname)
+        while True:
+            tup = self._find_one_component(modpath.pop(0), search_path)
+            if tup is None:
+                return None
+
+            fp, path, (suffix, _, kind) = tup
+            if modpath:
+                # Still more components to descent. Result must be a package
+                if fp:
+                    fp.close()
+                if kind != imp.PKG_DIRECTORY:
+                    LOG.debug('%r: %r appears to be child of non-package %r',
+                              self, fullname, path)
+                    return None
+                search_path = [path]
+            elif kind == imp.PKG_DIRECTORY:
+                return self._found_package(fullname, path)
+            else:
+                return self._found_module(fullname, path, fp)
+
+
+class ModuleFinder(object):
+    """
+    Given the name of a loaded module, make a best-effort attempt at finding
+    related modules likely needed by a child context requesting the original
+    module.
+    """
+    def __init__(self):
+        #: Import machinery is expensive, keep :py:meth`:get_module_source`
+        #: results around.
+        self._found_cache = {}
+
+        #: Avoid repeated dependency scanning, which is expensive.
+        self._related_cache = {}
+
+    def __repr__(self):
+        return 'ModuleFinder()'
+
+    def add_source_override(self, fullname, path, source, is_pkg):
+        """
+        Explicitly install a source cache entry, preventing usual lookup
+        methods from being used.
+
+        Beware the value of `path` is critical when `is_pkg` is specified,
+        since it directs where submodules are searched for.
+
+        :param str fullname:
+            Name of the module to override.
+        :param str path:
+            Module's path as it will appear in the cache.
+        :param bytes source:
+            Module source code as a bytestring.
+        :param bool is_pkg:
+            :data:`True` if the module is a package.
+        """
+        self._found_cache[fullname] = (path, source, is_pkg)
+
+    get_module_methods = [
+        DefectivePython3xMainMethod(),
+        PkgutilMethod(),
+        SysModulesMethod(),
+        ParentEnumerationMethod(),
+    ]
+
+    def get_module_source(self, fullname):
+        """
+        Given the name of a loaded module `fullname`, attempt to find its
+        source code.
+
+        :returns:
+            Tuple of `(module path, source text, is package?)`, or :data:`None`
+            if the source cannot be found.
+        """
+        tup = self._found_cache.get(fullname)
+        if tup:
+            return tup
+
+        for method in self.get_module_methods:
+            tup = method.find(fullname)
+            if tup:
+                #LOG.debug('%r returned %r', method, tup)
+                break
+        else:
+            tup = None, None, None
+            LOG.debug('get_module_source(%r): cannot find source', fullname)
+
+        self._found_cache[fullname] = tup
+        return tup
+
+    def resolve_relpath(self, fullname, level):
+        """
+        Given an ImportFrom AST node, guess the prefix that should be tacked on
+        to an alias name to produce a canonical name. `fullname` is the name of
+        the module in which the ImportFrom appears.
+        """
+        mod = sys.modules.get(fullname, None)
+        if hasattr(mod, '__path__'):
+            fullname += '.__init__'
+
+        if level == 0 or not fullname:
+            return ''
+
+        bits = fullname.split('.')
+        if len(bits) <= level:
+            # This would be an ImportError in real code.
+            return ''
+
+        return '.'.join(bits[:-level]) + '.'
+
+    def generate_parent_names(self, fullname):
+        while '.' in fullname:
+            fullname, _, _ = str_rpartition(to_text(fullname), u'.')
+            yield fullname
+
+    def find_related_imports(self, fullname):
+        """
+        Return a list of non-stdlib modules that are directly imported by
+        `fullname`, plus their parents.
+
+        The list is determined by retrieving the source code of
+        `fullname`, compiling it, and examining all IMPORT_NAME ops.
+
+        :param fullname: Fully qualified name of an *already imported* module
+            for which source code can be retrieved
+        :type fullname: str
+        """
+        related = self._related_cache.get(fullname)
+        if related is not None:
+            return related
+
+        modpath, src, _ = self.get_module_source(fullname)
+        if src is None:
+            return []
+
+        maybe_names = list(self.generate_parent_names(fullname))
+
+        co = compile(src, modpath, 'exec')
+        for level, modname, namelist in scan_code_imports(co):
+            if level == -1:
+                modnames = [modname, '%s.%s' % (fullname, modname)]
+            else:
+                modnames = [
+                    '%s%s' % (self.resolve_relpath(fullname, level), modname)
+                ]
+
+            maybe_names.extend(modnames)
+            maybe_names.extend(
+                '%s.%s' % (mname, name)
+                for mname in modnames
+                for name in namelist
+            )
+
+        return self._related_cache.setdefault(fullname, sorted(
+            set(
+                mitogen.core.to_text(name)
+                for name in maybe_names
+                if sys.modules.get(name) is not None
+                and not is_stdlib_name(name)
+                and u'six.moves' not in name  # TODO: crap
+            )
+        ))
+
+    def find_related(self, fullname):
+        """
+        Return a list of non-stdlib modules that are imported directly or
+        indirectly by `fullname`, plus their parents.
+
+        This method is like :py:meth:`find_related_imports`, but also
+        recursively searches any modules which are imported by `fullname`.
+
+        :param fullname: Fully qualified name of an *already imported* module
+            for which source code can be retrieved
+        :type fullname: str
+        """
+        stack = [fullname]
+        found = set()
+
+        while stack:
+            name = stack.pop(0)
+            names = self.find_related_imports(name)
+            stack.extend(set(names).difference(set(found).union(stack)))
+            found.update(names)
+
+        found.discard(fullname)
+        return sorted(found)
+
+
+class ModuleResponder(object):
+    def __init__(self, router):
+        self._log = logging.getLogger('mitogen.responder')
+        self._router = router
+        self._finder = ModuleFinder()
+        self._cache = {}  # fullname -> pickled
+        self.blacklist = []
+        self.whitelist = ['']
+
+        #: Context -> set([fullname, ..])
+        self._forwarded_by_context = {}
+
+        #: Number of GET_MODULE messages received.
+        self.get_module_count = 0
+        #: Total time spent in uncached GET_MODULE.
+        self.get_module_secs = 0.0
+        #: Total time spent minifying modules.
+        self.minify_secs = 0.0
+        #: Number of successful LOAD_MODULE messages sent.
+        self.good_load_module_count = 0
+        #: Total bytes in successful LOAD_MODULE payloads.
+        self.good_load_module_size = 0
+        #: Number of negative LOAD_MODULE messages sent.
+        self.bad_load_module_count = 0
+
+        router.add_handler(
+            fn=self._on_get_module,
+            handle=mitogen.core.GET_MODULE,
+        )
+
+    def __repr__(self):
+        return 'ModuleResponder'
+
+    def add_source_override(self, fullname, path, source, is_pkg):
+        """
+        See :meth:`ModuleFinder.add_source_override`.
+        """
+        self._finder.add_source_override(fullname, path, source, is_pkg)
+
+    MAIN_RE = re.compile(b(r'^if\s+__name__\s*==\s*.__main__.\s*:'), re.M)
+    main_guard_msg = (
+        "A child context attempted to import __main__, however the main "
+        "module present in the master process lacks an execution guard. "
+        "Update %r to prevent unintended execution, using a guard like:\n"
+        "\n"
+        "    if __name__ == '__main__':\n"
+        "        # your code here.\n"
+    )
+
+    def whitelist_prefix(self, fullname):
+        if self.whitelist == ['']:
+            self.whitelist = ['mitogen']
+        self.whitelist.append(fullname)
+
+    def blacklist_prefix(self, fullname):
+        self.blacklist.append(fullname)
+
+    def neutralize_main(self, path, src):
+        """
+        Given the source for the __main__ module, try to find where it begins
+        conditional execution based on a "if __name__ == '__main__'" guard, and
+        remove any code after that point.
+        """
+        match = self.MAIN_RE.search(src)
+        if match:
+            return src[:match.start()]
+
+        if b('mitogen.main(') in src:
+            return src
+
+        self._log.error(self.main_guard_msg, path)
+        raise ImportError('refused')
+
+    def _make_negative_response(self, fullname):
+        return (fullname, None, None, None, ())
+
+    minify_safe_re = re.compile(b(r'\s+#\s*!mitogen:\s*minify_safe'))
+
+    def _build_tuple(self, fullname):
+        if fullname in self._cache:
+            return self._cache[fullname]
+
+        if mitogen.core.is_blacklisted_import(self, fullname):
+            raise ImportError('blacklisted')
+
+        path, source, is_pkg = self._finder.get_module_source(fullname)
+        if path and is_stdlib_path(path):
+            # Prevent loading of 2.x<->3.x stdlib modules! This costs one
+            # RTT per hit, so a client-side solution is also required.
+            self._log.debug('refusing to serve stdlib module %r', fullname)
+            tup = self._make_negative_response(fullname)
+            self._cache[fullname] = tup
+            return tup
+
+        if source is None:
+            # TODO: make this .warning() or similar again once importer has its
+            # own logging category.
+            self._log.debug('could not find source for %r', fullname)
+            tup = self._make_negative_response(fullname)
+            self._cache[fullname] = tup
+            return tup
+
+        if self.minify_safe_re.search(source):
+            # If the module contains a magic marker, it's safe to minify.
+            t0 = mitogen.core.now()
+            source = mitogen.minify.minimize_source(source).encode('utf-8')
+            self.minify_secs += mitogen.core.now() - t0
+
+        if is_pkg:
+            pkg_present = get_child_modules(path)
+            self._log.debug('%s is a package at %s with submodules %r',
+                            fullname, path, pkg_present)
+        else:
+            pkg_present = None
+
+        if fullname == '__main__':
+            source = self.neutralize_main(path, source)
+        compressed = mitogen.core.Blob(zlib.compress(source, 9))
+        related = [
+            to_text(name)
+            for name in self._finder.find_related(fullname)
+            if not mitogen.core.is_blacklisted_import(self, name)
+        ]
+        # 0:fullname 1:pkg_present 2:path 3:compressed 4:related
+        tup = (
+            to_text(fullname),
+            pkg_present,
+            to_text(path),
+            compressed,
+            related
+        )
+        self._cache[fullname] = tup
+        return tup
+
+    def _send_load_module(self, stream, fullname):
+        if fullname not in stream.protocol.sent_modules:
+            tup = self._build_tuple(fullname)
+            msg = mitogen.core.Message.pickled(
+                tup,
+                dst_id=stream.protocol.remote_id,
+                handle=mitogen.core.LOAD_MODULE,
+            )
+            self._log.debug('sending %s (%.2f KiB) to %s',
+                            fullname, len(msg.data) / 1024.0, stream.name)
+            self._router._async_route(msg)
+            stream.protocol.sent_modules.add(fullname)
+            if tup[2] is not None:
+                self.good_load_module_count += 1
+                self.good_load_module_size += len(msg.data)
+            else:
+                self.bad_load_module_count += 1
+
+    def _send_module_load_failed(self, stream, fullname):
+        self.bad_load_module_count += 1
+        stream.protocol.send(
+            mitogen.core.Message.pickled(
+                self._make_negative_response(fullname),
+                dst_id=stream.protocol.remote_id,
+                handle=mitogen.core.LOAD_MODULE,
+            )
+        )
+
+    def _send_module_and_related(self, stream, fullname):
+        if fullname in stream.protocol.sent_modules:
+            return
+
+        try:
+            tup = self._build_tuple(fullname)
+            for name in tup[4]:  # related
+                parent, _, _ = str_partition(name, '.')
+                if parent != fullname and parent not in stream.protocol.sent_modules:
+                    # Parent hasn't been sent, so don't load submodule yet.
+                    continue
+
+                self._send_load_module(stream, name)
+            self._send_load_module(stream, fullname)
+        except Exception:
+            LOG.debug('While importing %r', fullname, exc_info=True)
+            self._send_module_load_failed(stream, fullname)
+
+    def _on_get_module(self, msg):
+        if msg.is_dead:
+            return
+
+        stream = self._router.stream_by_id(msg.src_id)
+        if stream is None:
+            return
+
+        fullname = msg.data.decode()
+        self._log.debug('%s requested module %s', stream.name, fullname)
+        self.get_module_count += 1
+        if fullname in stream.protocol.sent_modules:
+            LOG.warning('_on_get_module(): dup request for %r from %r',
+                        fullname, stream)
+
+        t0 = mitogen.core.now()
+        try:
+            self._send_module_and_related(stream, fullname)
+        finally:
+            self.get_module_secs += mitogen.core.now() - t0
+
+    def _send_forward_module(self, stream, context, fullname):
+        if stream.protocol.remote_id != context.context_id:
+            stream.protocol._send(
+                mitogen.core.Message(
+                    data=b('%s\x00%s' % (context.context_id, fullname)),
+                    handle=mitogen.core.FORWARD_MODULE,
+                    dst_id=stream.protocol.remote_id,
+                )
+            )
+
+    def _forward_one_module(self, context, fullname):
+        forwarded = self._forwarded_by_context.get(context)
+        if forwarded is None:
+            forwarded = set()
+            self._forwarded_by_context[context] = forwarded
+
+        if fullname in forwarded:
+            return
+
+        path = []
+        while fullname:
+            path.append(fullname)
+            fullname, _, _ = str_rpartition(fullname, u'.')
+
+        stream = self._router.stream_by_id(context.context_id)
+        if stream is None:
+            LOG.debug('%r: dropping forward of %s to no longer existent '
+                      '%r', self, path[0], context)
+            return
+
+        for fullname in reversed(path):
+            self._send_module_and_related(stream, fullname)
+            self._send_forward_module(stream, context, fullname)
+
+    def _forward_modules(self, context, fullnames):
+        IOLOG.debug('%r._forward_modules(%r, %r)', self, context, fullnames)
+        for fullname in fullnames:
+            self._forward_one_module(context, mitogen.core.to_text(fullname))
+
+    def forward_modules(self, context, fullnames):
+        self._router.broker.defer(self._forward_modules, context, fullnames)
+
+
+class Broker(mitogen.core.Broker):
+    """
+    .. note::
+
+        You may construct as many brokers as desired, and use the same broker
+        for multiple routers, however usually only one broker need exist.
+        Multiple brokers may be useful when dealing with sets of children with
+        differing lifetimes. For example, a subscription service where
+        non-payment results in termination for one customer.
+
+    :param bool install_watcher:
+        If :data:`True`, an additional thread is started to monitor the
+        lifetime of the main thread, triggering :meth:`shutdown`
+        automatically in case the user forgets to call it, or their code
+        crashed.
+
+        You should not rely on this functionality in your program, it is only
+        intended as a fail-safe and to simplify the API for new users. In
+        particular, alternative Python implementations may not be able to
+        support watching the main thread.
+    """
+    shutdown_timeout = 5.0
+    _watcher = None
+    poller_class = mitogen.parent.PREFERRED_POLLER
+
+    def __init__(self, install_watcher=True):
+        if install_watcher:
+            self._watcher = ThreadWatcher.watch(
+                target=threading.currentThread(),
+                on_join=self.shutdown,
+            )
+        super(Broker, self).__init__()
+        self.timers = mitogen.parent.TimerList()
+
+    def shutdown(self):
+        super(Broker, self).shutdown()
+        if self._watcher:
+            self._watcher.remove()
+
+
+class Router(mitogen.parent.Router):
+    """
+    Extend :class:`mitogen.core.Router` with functionality useful to masters,
+    and child contexts who later become masters. Currently when this class is
+    required, the target context's router is upgraded at runtime.
+
+    .. note::
+
+        You may construct as many routers as desired, and use the same broker
+        for multiple routers, however usually only one broker and router need
+        exist. Multiple routers may be useful when dealing with separate trust
+        domains, for example, manipulating infrastructure belonging to separate
+        customers or projects.
+
+    :param mitogen.master.Broker broker:
+        Broker to use. If not specified, a private :class:`Broker` is created.
+
+    :param int max_message_size:
+        Override the maximum message size this router is willing to receive or
+        transmit. Any value set here is automatically inherited by any children
+        created by the router.
+
+        This has a liberal default of 128 MiB, but may be set much lower.
+        Beware that setting it below 64KiB may encourage unexpected failures as
+        parents and children can no longer route large Python modules that may
+        be required by your application.
+    """
+
+    broker_class = Broker
+
+    #: When :data:`True`, cause the broker thread and any subsequent broker and
+    #: main threads existing in any child to write
+    #: ``/tmp/mitogen.stats.<pid>.<thread_name>.log`` containing a
+    #: :mod:`cProfile` dump on graceful exit. Must be set prior to construction
+    #: of any :class:`Broker`, e.g. via::
+    #:
+    #:      mitogen.master.Router.profiling = True
+    profiling = os.environ.get('MITOGEN_PROFILING') is not None
+
+    def __init__(self, broker=None, max_message_size=None):
+        if broker is None:
+            broker = self.broker_class()
+        if max_message_size:
+            self.max_message_size = max_message_size
+        super(Router, self).__init__(broker)
+        self.upgrade()
+
+    def upgrade(self):
+        self.id_allocator = IdAllocator(self)
+        self.responder = ModuleResponder(self)
+        self.log_forwarder = LogForwarder(self)
+        self.route_monitor = mitogen.parent.RouteMonitor(router=self)
+        self.add_handler(  # TODO: cutpaste.
+            fn=self._on_detaching,
+            handle=mitogen.core.DETACHING,
+            persist=True,
+        )
+
+    def _on_broker_exit(self):
+        super(Router, self)._on_broker_exit()
+        dct = self.get_stats()
+        dct['self'] = self
+        dct['minify_ms'] = 1000 * dct['minify_secs']
+        dct['get_module_ms'] = 1000 * dct['get_module_secs']
+        dct['good_load_module_size_kb'] = dct['good_load_module_size'] / 1024.0
+        dct['good_load_module_size_avg'] = (
+            (
+                dct['good_load_module_size'] /
+                (float(dct['good_load_module_count']) or 1.0)
+            ) / 1024.0
+        )
+
+        LOG.debug(
+            '%(self)r: stats: '
+                '%(get_module_count)d module requests in '
+                '%(get_module_ms)d ms, '
+                '%(good_load_module_count)d sent '
+                '(%(minify_ms)d ms minify time), '
+                '%(bad_load_module_count)d negative responses. '
+                'Sent %(good_load_module_size_kb).01f kb total, '
+                '%(good_load_module_size_avg).01f kb avg.'
+            % dct
+        )
+
+    def get_stats(self):
+        """
+        Return performance data for the module responder.
+
+        :returns:
+
+            Dict containing keys:
+
+            * `get_module_count`: Integer count of
+              :data:`mitogen.core.GET_MODULE` messages received.
+            * `get_module_secs`: Floating point total seconds spent servicing
+              :data:`mitogen.core.GET_MODULE` requests.
+            * `good_load_module_count`: Integer count of successful
+              :data:`mitogen.core.LOAD_MODULE` messages sent.
+            * `good_load_module_size`: Integer total bytes sent in
+              :data:`mitogen.core.LOAD_MODULE` message payloads.
+            * `bad_load_module_count`: Integer count of negative
+              :data:`mitogen.core.LOAD_MODULE` messages sent.
+            * `minify_secs`: CPU seconds spent minifying modules marked
+               minify-safe.
+        """
+        return {
+            'get_module_count': self.responder.get_module_count,
+            'get_module_secs': self.responder.get_module_secs,
+            'good_load_module_count': self.responder.good_load_module_count,
+            'good_load_module_size': self.responder.good_load_module_size,
+            'bad_load_module_count': self.responder.bad_load_module_count,
+            'minify_secs': self.responder.minify_secs,
+        }
+
+    def enable_debug(self):
+        """
+        Cause this context and any descendant child contexts to write debug
+        logs to ``/tmp/mitogen.<pid>.log``.
+        """
+        mitogen.core.enable_debug_logging()
+        self.debug = True
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, e_type, e_val, tb):
+        self.broker.shutdown()
+        self.broker.join()
+
+    def disconnect_stream(self, stream):
+        self.broker.defer(stream.on_disconnect, self.broker)
+
+    def disconnect_all(self):
+        for stream in self._stream_by_id.values():
+            self.disconnect_stream(stream)
+
+
+class IdAllocator(object):
+    """
+    Allocate IDs for new contexts constructed locally, and blocks of IDs for
+    children to allocate their own IDs using
+    :class:`mitogen.parent.ChildIdAllocator` without risk of conflict, and
+    without necessitating network round-trips for each new context.
+
+    This class responds to :data:`mitogen.core.ALLOCATE_ID` messages received
+    from children by replying with fresh block ID allocations.
+
+    The master's :class:`IdAllocator` instance can be accessed via
+    :attr:`mitogen.master.Router.id_allocator`.
+    """
+    #: Block allocations are made in groups of 1000 by default.
+    BLOCK_SIZE = 1000
+
+    def __init__(self, router):
+        self.router = router
+        self.next_id = 1
+        self.lock = threading.Lock()
+        router.add_handler(
+            fn=self.on_allocate_id,
+            handle=mitogen.core.ALLOCATE_ID,
+        )
+
+    def __repr__(self):
+        return 'IdAllocator(%r)' % (self.router,)
+
+    def allocate(self):
+        """
+        Allocate a context ID by directly incrementing an internal counter.
+
+        :returns:
+            The new context ID.
+        """
+        self.lock.acquire()
+        try:
+            id_ = self.next_id
+            self.next_id += 1
+            return id_
+        finally:
+            self.lock.release()
+
+    def allocate_block(self):
+        """
+        Allocate a block of IDs for use in a child context.
+
+        This function is safe to call from any thread.
+
+        :returns:
+            Tuple of the form `(id, end_id)` where `id` is the first usable ID
+            and `end_id` is the last usable ID.
+        """
+        self.lock.acquire()
+        try:
+            id_ = self.next_id
+            self.next_id += self.BLOCK_SIZE
+            end_id = id_ + self.BLOCK_SIZE
+            LOG.debug('%r: allocating [%d..%d)', self, id_, end_id)
+            return id_, end_id
+        finally:
+            self.lock.release()
+
+    def on_allocate_id(self, msg):
+        if msg.is_dead:
+            return
+
+        id_, last_id = self.allocate_block()
+        requestee = self.router.context_by_id(msg.src_id)
+        LOG.debug('%r: allocating [%r..%r) to %r',
+                  self, id_, last_id, requestee)
+        msg.reply((id_, last_id))
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/minify.py b/deployment/lib/mitogen-0.2.9/mitogen/minify.py
new file mode 100644
index 0000000000000000000000000000000000000000..09fdc4eb20367a8e185b35f805333af12c1eb060
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/minify.py
@@ -0,0 +1,143 @@
+# Copyright 2017, Alex Willmer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import sys
+
+try:
+    from io import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+import mitogen.core
+
+if sys.version_info < (2, 7, 11):
+    from mitogen.compat import tokenize
+else:
+    import tokenize
+
+
+def minimize_source(source):
+    """
+    Remove comments and docstrings from Python `source`, preserving line
+    numbers and syntax of empty blocks.
+
+    :param str source:
+        The source to minimize.
+
+    :returns str:
+        The minimized source.
+    """
+    source = mitogen.core.to_text(source)
+    tokens = tokenize.generate_tokens(StringIO(source).readline)
+    tokens = strip_comments(tokens)
+    tokens = strip_docstrings(tokens)
+    tokens = reindent(tokens)
+    return tokenize.untokenize(tokens)
+
+
+def strip_comments(tokens):
+    """
+    Drop comment tokens from a `tokenize` stream.
+
+    Comments on lines 1-2 are kept, to preserve hashbang and encoding.
+    Trailing whitespace is remove from all lines.
+    """
+    prev_typ = None
+    prev_end_col = 0
+    for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens:
+        if typ in (tokenize.NL, tokenize.NEWLINE):
+            if prev_typ in (tokenize.NL, tokenize.NEWLINE):
+                start_col = 0
+            else:
+                start_col = prev_end_col
+            end_col = start_col + 1
+        elif typ == tokenize.COMMENT and start_row > 2:
+            continue
+        prev_typ = typ
+        prev_end_col = end_col
+        yield typ, tok, (start_row, start_col), (end_row, end_col), line
+
+
+def strip_docstrings(tokens):
+    """
+    Replace docstring tokens with NL tokens in a `tokenize` stream.
+
+    Any STRING token not part of an expression is deemed a docstring.
+    Indented docstrings are not yet recognised.
+    """
+    stack = []
+    state = 'wait_string'
+    for t in tokens:
+        typ = t[0]
+        if state == 'wait_string':
+            if typ in (tokenize.NL, tokenize.COMMENT):
+                yield t
+            elif typ in (tokenize.DEDENT, tokenize.INDENT, tokenize.STRING):
+                stack.append(t)
+            elif typ == tokenize.NEWLINE:
+                stack.append(t)
+                start_line, end_line = stack[0][2][0], stack[-1][3][0]+1
+                for i in range(start_line, end_line):
+                    yield tokenize.NL, '\n', (i, 0), (i,1), '\n'
+                for t in stack:
+                    if t[0] in (tokenize.DEDENT, tokenize.INDENT):
+                        yield t[0], t[1], (i+1, t[2][1]), (i+1, t[3][1]), t[4]
+                del stack[:]
+            else:
+                stack.append(t)
+                for t in stack: yield t
+                del stack[:]
+                state = 'wait_newline'
+        elif state == 'wait_newline':
+            if typ == tokenize.NEWLINE:
+                state = 'wait_string'
+            yield t
+
+
+def reindent(tokens, indent=' '):
+    """
+    Replace existing indentation in a token steam, with `indent`.
+    """
+    old_levels = []
+    old_level = 0
+    new_level = 0
+    for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens:
+        if typ == tokenize.INDENT:
+            old_levels.append(old_level)
+            old_level = len(tok)
+            new_level += 1
+            tok = indent * new_level
+        elif typ == tokenize.DEDENT:
+            old_level = old_levels.pop()
+            new_level -= 1
+        start_col = max(0, start_col - old_level + new_level)
+        if start_row == end_row:
+            end_col = start_col + len(tok)
+        yield typ, tok, (start_row, start_col), (end_row, end_col), line
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/os_fork.py b/deployment/lib/mitogen-0.2.9/mitogen/os_fork.py
new file mode 100644
index 0000000000000000000000000000000000000000..da832c65e35970a0398b238ca8a39f5a09050c8c
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/os_fork.py
@@ -0,0 +1,187 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+Support for operating in a mixed threading/forking environment.
+"""
+
+import os
+import socket
+import sys
+import threading
+import weakref
+
+import mitogen.core
+
+
+# List of weakrefs. On Python 2.4, mitogen.core registers its Broker on this
+# list and mitogen.service registers its Pool too.
+_brokers = weakref.WeakKeyDictionary()
+_pools = weakref.WeakKeyDictionary()
+
+
+def _notice_broker_or_pool(obj):
+    """
+    Used by :mod:`mitogen.core` and :mod:`mitogen.service` to automatically
+    register every broker and pool on Python 2.4/2.5.
+    """
+    if isinstance(obj, mitogen.core.Broker):
+        _brokers[obj] = True
+    else:
+        _pools[obj] = True
+
+
+def wrap_os__fork():
+    corker = Corker(
+        brokers=list(_brokers),
+        pools=list(_pools),
+    )
+    try:
+        corker.cork()
+        return os__fork()
+    finally:
+        corker.uncork()
+
+
+# If Python 2.4/2.5 where threading state is not fixed up, subprocess.Popen()
+# may still deadlock due to the broker thread. In this case, pause os.fork() so
+# that all active threads are paused during fork.
+if sys.version_info < (2, 6):
+    os__fork = os.fork
+    os.fork = wrap_os__fork
+
+
+class Corker(object):
+    """
+    Arrange for :class:`mitogen.core.Broker` and optionally
+    :class:`mitogen.service.Pool` to be temporarily "corked" while fork
+    operations may occur.
+
+    In a mixed threading/forking environment, it is critical no threads are
+    active at the moment of fork, as they could hold mutexes whose state is
+    unrecoverably snapshotted in the locked state in the fork child, causing
+    deadlocks at random future moments.
+
+    To ensure a target thread has all locks dropped, it is made to write a
+    large string to a socket with a small buffer that has :data:`os.O_NONBLOCK`
+    disabled. CPython will drop the GIL and enter the ``write()`` system call,
+    where it will block until the socket buffer is drained, or the write side
+    is closed.
+
+    :class:`mitogen.core.Poller` is used to ensure the thread really has
+    blocked outside any Python locks, by checking if the socket buffer has
+    started to fill.
+
+    Since this necessarily involves posting a message to every existent thread
+    and verifying acknowledgement, it will never be a fast operation.
+
+    This does not yet handle the case of corking being initiated from within a
+    thread that is also a cork target.
+
+    :param brokers:
+        Sequence of :class:`mitogen.core.Broker` instances to cork.
+    :param pools:
+        Sequence of :class:`mitogen.core.Pool` instances to cork.
+    """
+    def __init__(self, brokers=(), pools=()):
+        self.brokers = brokers
+        self.pools = pools
+
+    def _do_cork(self, s, wsock):
+        try:
+            try:
+                while True:
+                    # at least EINTR is possible. Do our best to keep handling
+                    # outside the GIL in this case using sendall().
+                    wsock.sendall(s)
+            except socket.error:
+                pass
+        finally:
+            wsock.close()
+
+    def _cork_one(self, s, obj):
+        """
+        Construct a socketpair, saving one side of it, and passing the other to
+        `obj` to be written to by one of its threads.
+        """
+        rsock, wsock = mitogen.parent.create_socketpair(size=4096)
+        mitogen.core.set_cloexec(rsock.fileno())
+        mitogen.core.set_cloexec(wsock.fileno())
+        mitogen.core.set_block(wsock)  # gevent
+        self._rsocks.append(rsock)
+        obj.defer(self._do_cork, s, wsock)
+
+    def _verify_one(self, rsock):
+        """
+        Pause until the socket `rsock` indicates readability, due to
+        :meth:`_do_cork` triggering a blocking write on another thread.
+        """
+        poller = mitogen.core.Poller()
+        poller.start_receive(rsock.fileno())
+        try:
+            while True:
+                for fd in poller.poll():
+                    return
+        finally:
+            poller.close()
+
+    def cork(self):
+        """
+        Arrange for any associated brokers and pools to be paused with no locks
+        held. This will not return until each thread acknowledges it has ceased
+        execution.
+        """
+        current = threading.currentThread()
+        s = mitogen.core.b('CORK') * ((128 // 4) * 1024)
+        self._rsocks = []
+
+        # Pools must be paused first, as existing work may require the
+        # participation of a broker in order to complete.
+        for pool in self.pools:
+            if not pool.closed:
+                for th in pool._threads:
+                    if th != current:
+                        self._cork_one(s, pool)
+
+        for broker in self.brokers:
+            if broker._alive:
+                if broker._thread != current:
+                    self._cork_one(s, broker)
+
+        # Pause until we can detect every thread has entered write().
+        for rsock in self._rsocks:
+            self._verify_one(rsock)
+
+    def uncork(self):
+        """
+        Arrange for paused threads to resume operation.
+        """
+        for rsock in self._rsocks:
+            rsock.close()
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/parent.py b/deployment/lib/mitogen-0.2.9/mitogen/parent.py
new file mode 100644
index 0000000000000000000000000000000000000000..630e3de1928eafb71d699ee4e6a5b9f559185d8d
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/parent.py
@@ -0,0 +1,2770 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+This module defines functionality common to master and parent processes. It is
+sent to any child context that is due to become a parent, due to recursive
+connection.
+"""
+
+import codecs
+import errno
+import fcntl
+import getpass
+import heapq
+import inspect
+import logging
+import os
+import re
+import signal
+import socket
+import struct
+import subprocess
+import sys
+import termios
+import textwrap
+import threading
+import zlib
+
+# Absolute imports for <2.5.
+select = __import__('select')
+
+try:
+    import thread
+except ImportError:
+    import threading as thread
+
+import mitogen.core
+from mitogen.core import b
+from mitogen.core import bytes_partition
+from mitogen.core import IOLOG
+
+
+LOG = logging.getLogger(__name__)
+
+# #410: we must avoid the use of socketpairs if SELinux is enabled.
+try:
+    fp = open('/sys/fs/selinux/enforce', 'rb')
+    try:
+        SELINUX_ENABLED = bool(int(fp.read()))
+    finally:
+        fp.close()
+except IOError:
+    SELINUX_ENABLED = False
+
+
+try:
+    next
+except NameError:
+    # Python 2.4/2.5
+    from mitogen.core import next
+
+
+itervalues = getattr(dict, 'itervalues', dict.values)
+
+if mitogen.core.PY3:
+    xrange = range
+    closure_attr = '__closure__'
+    IM_SELF_ATTR = '__self__'
+else:
+    closure_attr = 'func_closure'
+    IM_SELF_ATTR = 'im_self'
+
+
+try:
+    SC_OPEN_MAX = os.sysconf('SC_OPEN_MAX')
+except ValueError:
+    SC_OPEN_MAX = 1024
+
+BROKER_SHUTDOWN_MSG = (
+    'Connection cancelled because the associated Broker began to shut down.'
+)
+
+OPENPTY_MSG = (
+    "Failed to create a PTY: %s. It is likely the maximum number of PTYs has "
+    "been reached. Consider increasing the 'kern.tty.ptmx_max' sysctl on OS "
+    "X, the 'kernel.pty.max' sysctl on Linux, or modifying your configuration "
+    "to avoid PTY use."
+)
+
+SYS_EXECUTABLE_MSG = (
+    "The Python sys.executable variable is unset, indicating Python was "
+    "unable to determine its original program name. Unless explicitly "
+    "configured otherwise, child contexts will be started using "
+    "'/usr/bin/python'"
+)
+_sys_executable_warning_logged = False
+
+
+def _ioctl_cast(n):
+    """
+    Linux ioctl() request parameter is unsigned, whereas on BSD/Darwin it is
+    signed. Until 2.5 Python exclusively implemented the BSD behaviour,
+    preventing use of large unsigned int requests like the TTY layer uses
+    below. So on 2.4, we cast our unsigned to look like signed for Python.
+    """
+    if sys.version_info < (2, 5):
+        n, = struct.unpack('i', struct.pack('I', n))
+    return n
+
+
+# If not :data:`None`, called prior to exec() of any new child process. Used by
+# :func:`mitogen.utils.reset_affinity` to allow the child to be freely
+# scheduled.
+_preexec_hook = None
+
+# Get PTY number; asm-generic/ioctls.h
+LINUX_TIOCGPTN = _ioctl_cast(2147767344)
+
+# Lock/unlock PTY; asm-generic/ioctls.h
+LINUX_TIOCSPTLCK = _ioctl_cast(1074025521)
+
+IS_LINUX = os.uname()[0] == 'Linux'
+
+SIGNAL_BY_NUM = dict(
+    (getattr(signal, name), name)
+    for name in sorted(vars(signal), reverse=True)
+    if name.startswith('SIG') and not name.startswith('SIG_')
+)
+
+_core_source_lock = threading.Lock()
+_core_source_partial = None
+
+
+def get_log_level():
+    return (LOG.getEffectiveLevel() or logging.INFO)
+
+
+def get_sys_executable():
+    """
+    Return :data:`sys.executable` if it is set, otherwise return
+    ``"/usr/bin/python"`` and log a warning.
+    """
+    if sys.executable:
+        return sys.executable
+
+    global _sys_executable_warning_logged
+    if not _sys_executable_warning_logged:
+        LOG.warn(SYS_EXECUTABLE_MSG)
+        _sys_executable_warning_logged = True
+
+    return '/usr/bin/python'
+
+
+def _get_core_source():
+    """
+    In non-masters, simply fetch the cached mitogen.core source code via the
+    import mechanism. In masters, this function is replaced with a version that
+    performs minification directly.
+    """
+    return inspect.getsource(mitogen.core)
+
+
+def get_core_source_partial():
+    """
+    _get_core_source() is expensive, even with @lru_cache in minify.py, threads
+    can enter it simultaneously causing severe slowdowns.
+    """
+    global _core_source_partial
+
+    if _core_source_partial is None:
+        _core_source_lock.acquire()
+        try:
+            if _core_source_partial is None:
+                _core_source_partial = PartialZlib(
+                    _get_core_source().encode('utf-8')
+                )
+        finally:
+            _core_source_lock.release()
+
+    return _core_source_partial
+
+
+def get_default_remote_name():
+    """
+    Return the default name appearing in argv[0] of remote machines.
+    """
+    s = u'%s@%s:%d'
+    s %= (getpass.getuser(), socket.gethostname(), os.getpid())
+    # In mixed UNIX/Windows environments, the username may contain slashes.
+    return s.translate({
+        ord(u'\\'): ord(u'_'),
+        ord(u'/'): ord(u'_')
+    })
+
+
+def is_immediate_child(msg, stream):
+    """
+    Handler policy that requires messages to arrive only from immediately
+    connected children.
+    """
+    return msg.src_id == stream.protocol.remote_id
+
+
+def flags(names):
+    """
+    Return the result of ORing a set of (space separated) :py:mod:`termios`
+    module constants together.
+    """
+    return sum(getattr(termios, name, 0)
+               for name in names.split())
+
+
+def cfmakeraw(tflags):
+    """
+    Given a list returned by :py:func:`termios.tcgetattr`, return a list
+    modified in a manner similar to the `cfmakeraw()` C library function, but
+    additionally disabling local echo.
+    """
+    # BSD: github.com/freebsd/freebsd/blob/master/lib/libc/gen/termios.c#L162
+    # Linux: github.com/lattera/glibc/blob/master/termios/cfmakeraw.c#L20
+    iflag, oflag, cflag, lflag, ispeed, ospeed, cc = tflags
+    iflag &= ~flags('IMAXBEL IXOFF INPCK BRKINT PARMRK '
+                    'ISTRIP INLCR ICRNL IXON IGNPAR')
+    iflag &= ~flags('IGNBRK BRKINT PARMRK')
+    oflag &= ~flags('OPOST')
+    lflag &= ~flags('ECHO ECHOE ECHOK ECHONL ICANON ISIG '
+                    'IEXTEN NOFLSH TOSTOP PENDIN')
+    cflag &= ~flags('CSIZE PARENB')
+    cflag |= flags('CS8 CREAD')
+    return [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
+
+
+def disable_echo(fd):
+    old = termios.tcgetattr(fd)
+    new = cfmakeraw(old)
+    flags = getattr(termios, 'TCSASOFT', 0)
+    if not mitogen.core.IS_WSL:
+        # issue #319: Windows Subsystem for Linux as of July 2018 throws EINVAL
+        # if TCSAFLUSH is specified.
+        flags |= termios.TCSAFLUSH
+    termios.tcsetattr(fd, flags, new)
+
+
+def create_socketpair(size=None):
+    """
+    Create a :func:`socket.socketpair` for use as a child's UNIX stdio
+    channels. As socketpairs are bidirectional, they are economical on file
+    descriptor usage as one descriptor can be used for ``stdin`` and
+    ``stdout``. As they are sockets their buffers are tunable, allowing large
+    buffers to improve file transfer throughput and reduce IO loop iterations.
+    """
+    if size is None:
+        size = mitogen.core.CHUNK_SIZE
+
+    parentfp, childfp = socket.socketpair()
+    for fp in parentfp, childfp:
+        fp.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, size)
+
+    return parentfp, childfp
+
+
+def create_best_pipe(escalates_privilege=False):
+    """
+    By default we prefer to communicate with children over a UNIX socket, as a
+    single file descriptor can represent bidirectional communication, and a
+    cross-platform API exists to align buffer sizes with the needs of the
+    library.
+
+    SELinux prevents us setting up a privileged process to inherit an AF_UNIX
+    socket, a facility explicitly designed as a better replacement for pipes,
+    because at some point in the mid 90s it might have been commonly possible
+    for AF_INET sockets to end up undesirably connected to a privileged
+    process, so let's make up arbitrary rules breaking all sockets instead.
+
+    If SELinux is detected, fall back to using pipes.
+
+    :param bool escalates_privilege:
+        If :data:`True`, the target program may escalate privileges, causing
+        SELinux to disconnect AF_UNIX sockets, so avoid those.
+    :returns:
+        `(parent_rfp, child_wfp, child_rfp, parent_wfp)`
+    """
+    if (not escalates_privilege) or (not SELINUX_ENABLED):
+        parentfp, childfp = create_socketpair()
+        return parentfp, childfp, childfp, parentfp
+
+    parent_rfp, child_wfp = mitogen.core.pipe()
+    try:
+        child_rfp, parent_wfp = mitogen.core.pipe()
+        return parent_rfp, child_wfp, child_rfp, parent_wfp
+    except:
+        parent_rfp.close()
+        child_wfp.close()
+        raise
+
+
+def popen(**kwargs):
+    """
+    Wrap :class:`subprocess.Popen` to ensure any global :data:`_preexec_hook`
+    is invoked in the child.
+    """
+    real_preexec_fn = kwargs.pop('preexec_fn', None)
+    def preexec_fn():
+        if _preexec_hook:
+            _preexec_hook()
+        if real_preexec_fn:
+            real_preexec_fn()
+    return subprocess.Popen(preexec_fn=preexec_fn, **kwargs)
+
+
+def create_child(args, merge_stdio=False, stderr_pipe=False,
+                 escalates_privilege=False, preexec_fn=None):
+    """
+    Create a child process whose stdin/stdout is connected to a socket.
+
+    :param list args:
+        Program argument vector.
+    :param bool merge_stdio:
+        If :data:`True`, arrange for `stderr` to be connected to the `stdout`
+        socketpair, rather than inherited from the parent process. This may be
+        necessary to ensure that no TTY is connected to any stdio handle, for
+        instance when using LXC.
+    :param bool stderr_pipe:
+        If :data:`True` and `merge_stdio` is :data:`False`, arrange for
+        `stderr` to be connected to a separate pipe, to allow any ongoing debug
+        logs generated by e.g. SSH to be output as the session progresses,
+        without interfering with `stdout`.
+    :param bool escalates_privilege:
+        If :data:`True`, the target program may escalate privileges, causing
+        SELinux to disconnect AF_UNIX sockets, so avoid those.
+    :param function preexec_fn:
+        If not :data:`None`, a function to run within the post-fork child
+        before executing the target program.
+    :returns:
+        :class:`Process` instance.
+    """
+    parent_rfp, child_wfp, child_rfp, parent_wfp = create_best_pipe(
+        escalates_privilege=escalates_privilege
+    )
+
+    stderr = None
+    stderr_r = None
+    if merge_stdio:
+        stderr = child_wfp
+    elif stderr_pipe:
+        stderr_r, stderr = mitogen.core.pipe()
+        mitogen.core.set_cloexec(stderr_r.fileno())
+
+    try:
+        proc = popen(
+            args=args,
+            stdin=child_rfp,
+            stdout=child_wfp,
+            stderr=stderr,
+            close_fds=True,
+            preexec_fn=preexec_fn,
+        )
+    except:
+        child_rfp.close()
+        child_wfp.close()
+        parent_rfp.close()
+        parent_wfp.close()
+        if stderr_pipe:
+            stderr.close()
+            stderr_r.close()
+        raise
+
+    child_rfp.close()
+    child_wfp.close()
+    if stderr_pipe:
+        stderr.close()
+
+    return PopenProcess(
+        proc=proc,
+        stdin=parent_wfp,
+        stdout=parent_rfp,
+        stderr=stderr_r,
+    )
+
+
+def _acquire_controlling_tty():
+    os.setsid()
+    if sys.platform in ('linux', 'linux2'):
+        # On Linux, the controlling tty becomes the first tty opened by a
+        # process lacking any prior tty.
+        os.close(os.open(os.ttyname(2), os.O_RDWR))
+    if hasattr(termios, 'TIOCSCTTY') and not mitogen.core.IS_WSL:
+        # #550: prehistoric WSL does not like TIOCSCTTY.
+        # On BSD an explicit ioctl is required. For some inexplicable reason,
+        # Python 2.6 on Travis also requires it.
+        fcntl.ioctl(2, termios.TIOCSCTTY)
+
+
+def _linux_broken_devpts_openpty():
+    """
+    #462: On broken Linux hosts with mismatched configuration (e.g. old
+    /etc/fstab template installed), /dev/pts may be mounted without the gid=
+    mount option, causing new slave devices to be created with the group ID of
+    the calling process. This upsets glibc, whose openpty() is required by
+    specification to produce a slave owned by a special group ID (which is
+    always the 'tty' group).
+
+    Glibc attempts to use "pt_chown" to fix ownership. If that fails, it
+    chown()s the PTY directly, which fails due to non-root, causing openpty()
+    to fail with EPERM ("Operation not permitted"). Since we don't need the
+    magical TTY group to run sudo and su, open the PTY ourselves in this case.
+    """
+    master_fd = None
+    try:
+        # Opening /dev/ptmx causes a PTY pair to be allocated, and the
+        # corresponding slave /dev/pts/* device to be created, owned by UID/GID
+        # matching this process.
+        master_fd = os.open('/dev/ptmx', os.O_RDWR)
+        # Clear the lock bit from the PTY. This a prehistoric feature from a
+        # time when slave device files were persistent.
+        fcntl.ioctl(master_fd, LINUX_TIOCSPTLCK, struct.pack('i', 0))
+        # Since v4.13 TIOCGPTPEER exists to open the slave in one step, but we
+        # must support older kernels. Ask for the PTY number.
+        pty_num_s = fcntl.ioctl(master_fd, LINUX_TIOCGPTN,
+                                struct.pack('i', 0))
+        pty_num, = struct.unpack('i', pty_num_s)
+        pty_name = '/dev/pts/%d' % (pty_num,)
+        # Now open it with O_NOCTTY to ensure it doesn't change our controlling
+        # TTY. Otherwise when we close the FD we get killed by the kernel, and
+        # the child we spawn that should really attach to it will get EPERM
+        # during _acquire_controlling_tty().
+        slave_fd = os.open(pty_name, os.O_RDWR|os.O_NOCTTY)
+        return master_fd, slave_fd
+    except OSError:
+        if master_fd is not None:
+            os.close(master_fd)
+        e = sys.exc_info()[1]
+        raise mitogen.core.StreamError(OPENPTY_MSG, e)
+
+
+def openpty():
+    """
+    Call :func:`os.openpty`, raising a descriptive error if the call fails.
+
+    :raises mitogen.core.StreamError:
+        Creating a PTY failed.
+    :returns:
+        `(master_fp, slave_fp)` file-like objects.
+    """
+    try:
+        master_fd, slave_fd = os.openpty()
+    except OSError:
+        e = sys.exc_info()[1]
+        if not (IS_LINUX and e.args[0] == errno.EPERM):
+            raise mitogen.core.StreamError(OPENPTY_MSG, e)
+        master_fd, slave_fd = _linux_broken_devpts_openpty()
+
+    master_fp = os.fdopen(master_fd, 'r+b', 0)
+    slave_fp = os.fdopen(slave_fd, 'r+b', 0)
+    disable_echo(master_fd)
+    disable_echo(slave_fd)
+    mitogen.core.set_block(slave_fd)
+    return master_fp, slave_fp
+
+
+def tty_create_child(args):
+    """
+    Return a file descriptor connected to the master end of a pseudo-terminal,
+    whose slave end is connected to stdin/stdout/stderr of a new child process.
+    The child is created such that the pseudo-terminal becomes its controlling
+    TTY, ensuring access to /dev/tty returns a new file descriptor open on the
+    slave end.
+
+    :param list args:
+        Program argument vector.
+    :returns:
+        :class:`Process` instance.
+    """
+    master_fp, slave_fp = openpty()
+    try:
+        proc = popen(
+            args=args,
+            stdin=slave_fp,
+            stdout=slave_fp,
+            stderr=slave_fp,
+            preexec_fn=_acquire_controlling_tty,
+            close_fds=True,
+        )
+    except:
+        master_fp.close()
+        slave_fp.close()
+        raise
+
+    slave_fp.close()
+    return PopenProcess(
+        proc=proc,
+        stdin=master_fp,
+        stdout=master_fp,
+    )
+
+
+def hybrid_tty_create_child(args, escalates_privilege=False):
+    """
+    Like :func:`tty_create_child`, except attach stdin/stdout to a socketpair
+    like :func:`create_child`, but leave stderr and the controlling TTY
+    attached to a TTY.
+
+    This permits high throughput communication with programs that are reached
+    via some program that requires a TTY for password input, like many
+    configurations of sudo. The UNIX TTY layer tends to have tiny (no more than
+    14KiB) buffers, forcing many IO loop iterations when transferring bulk
+    data, causing significant performance loss.
+
+    :param bool escalates_privilege:
+        If :data:`True`, the target program may escalate privileges, causing
+        SELinux to disconnect AF_UNIX sockets, so avoid those.
+    :param list args:
+        Program argument vector.
+    :returns:
+        :class:`Process` instance.
+    """
+    master_fp, slave_fp = openpty()
+    try:
+        parent_rfp, child_wfp, child_rfp, parent_wfp = create_best_pipe(
+            escalates_privilege=escalates_privilege,
+        )
+        try:
+            mitogen.core.set_block(child_rfp)
+            mitogen.core.set_block(child_wfp)
+            proc = popen(
+                args=args,
+                stdin=child_rfp,
+                stdout=child_wfp,
+                stderr=slave_fp,
+                preexec_fn=_acquire_controlling_tty,
+                close_fds=True,
+            )
+        except:
+            parent_rfp.close()
+            child_wfp.close()
+            parent_wfp.close()
+            child_rfp.close()
+            raise
+    except:
+        master_fp.close()
+        slave_fp.close()
+        raise
+
+    slave_fp.close()
+    child_rfp.close()
+    child_wfp.close()
+    return PopenProcess(
+        proc=proc,
+        stdin=parent_wfp,
+        stdout=parent_rfp,
+        stderr=master_fp,
+    )
+
+
+class Timer(object):
+    """
+    Represents a future event.
+    """
+    #: Set to :data:`False` if :meth:`cancel` has been called, or immediately
+    #: prior to being executed by :meth:`TimerList.expire`.
+    active = True
+
+    def __init__(self, when, func):
+        self.when = when
+        self.func = func
+
+    def __repr__(self):
+        return 'Timer(%r, %r)' % (self.when, self.func)
+
+    def __eq__(self, other):
+        return self.when == other.when
+
+    def __lt__(self, other):
+        return self.when < other.when
+
+    def __le__(self, other):
+        return self.when <= other.when
+
+    def cancel(self):
+        """
+        Cancel this event. If it has not yet executed, it will not execute
+        during any subsequent :meth:`TimerList.expire` call.
+        """
+        self.active = False
+
+
+class TimerList(object):
+    """
+    Efficiently manage a list of cancellable future events relative to wall
+    clock time. An instance of this class is installed as
+    :attr:`mitogen.master.Broker.timers` by default, and as
+    :attr:`mitogen.core.Broker.timers` in children after a call to
+    :func:`mitogen.parent.upgrade_router`.
+
+    You can use :class:`TimerList` to cause the broker to wake at arbitrary
+    future moments, useful for implementing timeouts and polling in an
+    asynchronous context.
+
+    :class:`TimerList` methods can only be called from asynchronous context,
+    for example via :meth:`mitogen.core.Broker.defer`.
+
+    The broker automatically adjusts its sleep delay according to the installed
+    timer list, and arranges for timers to expire via automatic calls to
+    :meth:`expire`. The main user interface to :class:`TimerList` is
+    :meth:`schedule`.
+    """
+    _now = mitogen.core.now
+
+    def __init__(self):
+        self._lst = []
+
+    def get_timeout(self):
+        """
+        Return the floating point seconds until the next event is due.
+        
+        :returns:
+            Floating point delay, or 0.0, or :data:`None` if no events are
+            scheduled.
+        """
+        while self._lst and not self._lst[0].active:
+            heapq.heappop(self._lst)
+        if self._lst:
+            return max(0, self._lst[0].when - self._now())
+
+    def schedule(self, when, func):
+        """
+        Schedule a future event.
+
+        :param float when:
+            UNIX time in seconds when event should occur.
+        :param callable func:
+            Callable to invoke on expiry.
+        :returns:
+            A :class:`Timer` instance, exposing :meth:`Timer.cancel`, which may
+            be used to cancel the future invocation.
+        """
+        timer = Timer(when, func)
+        heapq.heappush(self._lst, timer)
+        return timer
+
+    def expire(self):
+        """
+        Invoke callbacks for any events in the past.
+        """
+        now = self._now()
+        while self._lst and self._lst[0].when <= now:
+            timer = heapq.heappop(self._lst)
+            if timer.active:
+                timer.active = False
+                timer.func()
+
+
+class PartialZlib(object):
+    """
+    Because the mitogen.core source has a line appended to it during bootstrap,
+    it must be recompressed for each connection. This is not a problem for a
+    small number of connections, but it amounts to 30 seconds CPU time by the
+    time 500 targets are in use.
+
+    For that reason, build a compressor containing mitogen.core and flush as
+    much of it as possible into an initial buffer. Then to append the custom
+    line, clone the compressor and compress just that line.
+
+    A full compression costs ~6ms on a modern machine, this method costs ~35
+    usec.
+    """
+    def __init__(self, s):
+        self.s = s
+        if sys.version_info > (2, 5):
+            self._compressor = zlib.compressobj(9)
+            self._out = self._compressor.compress(s)
+            self._out += self._compressor.flush(zlib.Z_SYNC_FLUSH)
+        else:
+            self._compressor = None
+
+    def append(self, s):
+        """
+        Append the bytestring `s` to the compressor state and return the
+        final compressed output.
+        """
+        if self._compressor is None:
+            return zlib.compress(self.s + s, 9)
+        else:
+            compressor = self._compressor.copy()
+            out = self._out
+            out += compressor.compress(s)
+            return out + compressor.flush()
+
+
+def _upgrade_broker(broker):
+    """
+    Extract the poller state from Broker and replace it with the industrial
+    strength poller for this OS. Must run on the Broker thread.
+    """
+    # This function is deadly! The act of calling start_receive() generates log
+    # messages which must be silenced as the upgrade progresses, otherwise the
+    # poller state will change as it is copied, resulting in write fds that are
+    # lost. (Due to LogHandler->Router->Stream->Protocol->Broker->Poller, where
+    # Stream only calls start_transmit() when transitioning from empty to
+    # non-empty buffer. If the start_transmit() is lost, writes from the child
+    # hang permanently).
+    root = logging.getLogger()
+    old_level = root.level
+    root.setLevel(logging.CRITICAL)
+    try:
+        old = broker.poller
+        new = PREFERRED_POLLER()
+        for fd, data in old.readers:
+            new.start_receive(fd, data)
+        for fd, data in old.writers:
+            new.start_transmit(fd, data)
+
+        old.close()
+        broker.poller = new
+    finally:
+        root.setLevel(old_level)
+
+    broker.timers = TimerList()
+    LOG.debug('upgraded %r with %r (new: %d readers, %d writers; '
+              'old: %d readers, %d writers)', old, new,
+              len(new.readers), len(new.writers),
+              len(old.readers), len(old.writers))
+
+
+@mitogen.core.takes_econtext
+def upgrade_router(econtext):
+    if not isinstance(econtext.router, Router):  # TODO
+        econtext.broker.defer(_upgrade_broker, econtext.broker)
+        econtext.router.__class__ = Router  # TODO
+        econtext.router.upgrade(
+            importer=econtext.importer,
+            parent=econtext.parent,
+        )
+
+
+def get_connection_class(name):
+    """
+    Given the name of a Mitogen connection method, import its implementation
+    module and return its Stream subclass.
+    """
+    if name == u'local':
+        name = u'parent'
+    module = mitogen.core.import_module(u'mitogen.' + name)
+    return module.Connection
+
+
+@mitogen.core.takes_econtext
+def _proxy_connect(name, method_name, kwargs, econtext):
+    """
+    Implements the target portion of Router._proxy_connect() by upgrading the
+    local process to a parent if it was not already, then calling back into
+    Router._connect() using the arguments passed to the parent's
+    Router.connect().
+
+    :returns:
+        Dict containing:
+        * ``id``: :data:`None`, or integer new context ID.
+        * ``name``: :data:`None`, or string name attribute of new Context.
+        * ``msg``: :data:`None`, or StreamError exception text.
+    """
+    upgrade_router(econtext)
+
+    try:
+        context = econtext.router._connect(
+            klass=get_connection_class(method_name),
+            name=name,
+            **kwargs
+        )
+    except mitogen.core.StreamError:
+        return {
+            u'id': None,
+            u'name': None,
+            u'msg': 'error occurred on host %s: %s' % (
+                socket.gethostname(),
+                sys.exc_info()[1],
+            ),
+        }
+
+    return {
+        u'id': context.context_id,
+        u'name': context.name,
+        u'msg': None,
+    }
+
+
+def returncode_to_str(n):
+    """
+    Parse and format a :func:`os.waitpid` exit status.
+    """
+    if n < 0:
+        return 'exited due to signal %d (%s)' % (-n, SIGNAL_BY_NUM.get(-n))
+    return 'exited with return code %d' % (n,)
+
+
+class EofError(mitogen.core.StreamError):
+    """
+    Raised by :class:`Connection` when an empty read is detected from the
+    remote process before bootstrap completes.
+    """
+    # inherits from StreamError to maintain compatibility.
+    pass
+
+
+class CancelledError(mitogen.core.StreamError):
+    """
+    Raised by :class:`Connection` when :meth:`mitogen.core.Broker.shutdown` is
+    called before bootstrap completes.
+    """
+    pass
+
+
+class Argv(object):
+    """
+    Wrapper to defer argv formatting when debug logging is disabled.
+    """
+    def __init__(self, argv):
+        self.argv = argv
+
+    must_escape = frozenset('\\$"`!')
+    must_escape_or_space = must_escape | frozenset(' ')
+
+    def escape(self, x):
+        if not self.must_escape_or_space.intersection(x):
+            return x
+
+        s = '"'
+        for c in x:
+            if c in self.must_escape:
+                s += '\\'
+            s += c
+        s += '"'
+        return s
+
+    def __str__(self):
+        return ' '.join(map(self.escape, self.argv))
+
+
+class CallSpec(object):
+    """
+    Wrapper to defer call argument formatting when debug logging is disabled.
+    """
+    def __init__(self, func, args, kwargs):
+        self.func = func
+        self.args = args
+        self.kwargs = kwargs
+
+    def _get_name(self):
+        bits = [self.func.__module__]
+        if inspect.ismethod(self.func):
+            im_self = getattr(self.func, IM_SELF_ATTR)
+            bits.append(getattr(im_self, '__name__', None) or
+                        getattr(type(im_self), '__name__', None))
+        bits.append(self.func.__name__)
+        return u'.'.join(bits)
+
+    def _get_args(self):
+        return u', '.join(repr(a) for a in self.args)
+
+    def _get_kwargs(self):
+        s = u''
+        if self.kwargs:
+            s = u', '.join('%s=%r' % (k, v) for k, v in self.kwargs.items())
+            if self.args:
+                s = u', ' + s
+        return s
+
+    def __repr__(self):
+        return '%s(%s%s)' % (
+            self._get_name(),
+            self._get_args(),
+            self._get_kwargs(),
+        )
+
+
+class PollPoller(mitogen.core.Poller):
+    """
+    Poller based on the POSIX :linux:man2:`poll` interface. Not available on
+    some versions of OS X, otherwise it is the preferred poller for small FD
+    counts, as there is no setup/teardown/configuration system call overhead.
+    """
+    SUPPORTED = hasattr(select, 'poll')
+    _repr = 'PollPoller()'
+
+    def __init__(self):
+        super(PollPoller, self).__init__()
+        self._pollobj = select.poll()
+
+    # TODO: no proof we dont need writemask too
+    _readmask = (
+        getattr(select, 'POLLIN', 0) |
+        getattr(select, 'POLLHUP', 0)
+    )
+
+    def _update(self, fd):
+        mask = (((fd in self._rfds) and self._readmask) |
+                ((fd in self._wfds) and select.POLLOUT))
+        if mask:
+            self._pollobj.register(fd, mask)
+        else:
+            try:
+                self._pollobj.unregister(fd)
+            except KeyError:
+                pass
+
+    def _poll(self, timeout):
+        if timeout:
+            timeout *= 1000
+
+        events, _ = mitogen.core.io_op(self._pollobj.poll, timeout)
+        for fd, event in events:
+            if event & self._readmask:
+                IOLOG.debug('%r: POLLIN|POLLHUP for %r', self, fd)
+                data, gen = self._rfds.get(fd, (None, None))
+                if gen and gen < self._generation:
+                    yield data
+            if event & select.POLLOUT:
+                IOLOG.debug('%r: POLLOUT for %r', self, fd)
+                data, gen = self._wfds.get(fd, (None, None))
+                if gen and gen < self._generation:
+                    yield data
+
+
+class KqueuePoller(mitogen.core.Poller):
+    """
+    Poller based on the FreeBSD/Darwin :freebsd:man2:`kqueue` interface.
+    """
+    SUPPORTED = hasattr(select, 'kqueue')
+    _repr = 'KqueuePoller()'
+
+    def __init__(self):
+        super(KqueuePoller, self).__init__()
+        self._kqueue = select.kqueue()
+        self._changelist = []
+
+    def close(self):
+        super(KqueuePoller, self).close()
+        self._kqueue.close()
+
+    def _control(self, fd, filters, flags):
+        mitogen.core._vv and IOLOG.debug(
+            '%r._control(%r, %r, %r)', self, fd, filters, flags)
+        # TODO: at shutdown it is currently possible for KQ_EV_ADD/KQ_EV_DEL
+        # pairs to be pending after the associated file descriptor has already
+        # been closed. Fixing this requires maintaining extra state, or perhaps
+        # making fd closure the poller's responsibility. In the meantime,
+        # simply apply changes immediately.
+        # self._changelist.append(select.kevent(fd, filters, flags))
+        changelist = [select.kevent(fd, filters, flags)]
+        events, _ = mitogen.core.io_op(self._kqueue.control, changelist, 0, 0)
+        assert not events
+
+    def start_receive(self, fd, data=None):
+        mitogen.core._vv and IOLOG.debug('%r.start_receive(%r, %r)',
+            self, fd, data)
+        if fd not in self._rfds:
+            self._control(fd, select.KQ_FILTER_READ, select.KQ_EV_ADD)
+        self._rfds[fd] = (data or fd, self._generation)
+
+    def stop_receive(self, fd):
+        mitogen.core._vv and IOLOG.debug('%r.stop_receive(%r)', self, fd)
+        if fd in self._rfds:
+            self._control(fd, select.KQ_FILTER_READ, select.KQ_EV_DELETE)
+            del self._rfds[fd]
+
+    def start_transmit(self, fd, data=None):
+        mitogen.core._vv and IOLOG.debug('%r.start_transmit(%r, %r)',
+            self, fd, data)
+        if fd not in self._wfds:
+            self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_ADD)
+        self._wfds[fd] = (data or fd, self._generation)
+
+    def stop_transmit(self, fd):
+        mitogen.core._vv and IOLOG.debug('%r.stop_transmit(%r)', self, fd)
+        if fd in self._wfds:
+            self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)
+            del self._wfds[fd]
+
+    def _poll(self, timeout):
+        changelist = self._changelist
+        self._changelist = []
+        events, _ = mitogen.core.io_op(self._kqueue.control,
+            changelist, 32, timeout)
+        for event in events:
+            fd = event.ident
+            if event.flags & select.KQ_EV_ERROR:
+                LOG.debug('ignoring stale event for fd %r: errno=%d: %s',
+                          fd, event.data, errno.errorcode.get(event.data))
+            elif event.filter == select.KQ_FILTER_READ:
+                data, gen = self._rfds.get(fd, (None, None))
+                # Events can still be read for an already-discarded fd.
+                if gen and gen < self._generation:
+                    mitogen.core._vv and IOLOG.debug('%r: POLLIN: %r', self, fd)
+                    yield data
+            elif event.filter == select.KQ_FILTER_WRITE and fd in self._wfds:
+                data, gen = self._wfds.get(fd, (None, None))
+                if gen and gen < self._generation:
+                    mitogen.core._vv and IOLOG.debug('%r: POLLOUT: %r', self, fd)
+                    yield data
+
+
+class EpollPoller(mitogen.core.Poller):
+    """
+    Poller based on the Linux :linux:man2:`epoll` interface.
+    """
+    SUPPORTED = hasattr(select, 'epoll')
+    _repr = 'EpollPoller()'
+
+    def __init__(self):
+        super(EpollPoller, self).__init__()
+        self._epoll = select.epoll(32)
+        self._registered_fds = set()
+
+    def close(self):
+        super(EpollPoller, self).close()
+        self._epoll.close()
+
+    def _control(self, fd):
+        mitogen.core._vv and IOLOG.debug('%r._control(%r)', self, fd)
+        mask = (((fd in self._rfds) and select.EPOLLIN) |
+                ((fd in self._wfds) and select.EPOLLOUT))
+        if mask:
+            if fd in self._registered_fds:
+                self._epoll.modify(fd, mask)
+            else:
+                self._epoll.register(fd, mask)
+                self._registered_fds.add(fd)
+        elif fd in self._registered_fds:
+            self._epoll.unregister(fd)
+            self._registered_fds.remove(fd)
+
+    def start_receive(self, fd, data=None):
+        mitogen.core._vv and IOLOG.debug('%r.start_receive(%r, %r)',
+            self, fd, data)
+        self._rfds[fd] = (data or fd, self._generation)
+        self._control(fd)
+
+    def stop_receive(self, fd):
+        mitogen.core._vv and IOLOG.debug('%r.stop_receive(%r)', self, fd)
+        self._rfds.pop(fd, None)
+        self._control(fd)
+
+    def start_transmit(self, fd, data=None):
+        mitogen.core._vv and IOLOG.debug('%r.start_transmit(%r, %r)',
+            self, fd, data)
+        self._wfds[fd] = (data or fd, self._generation)
+        self._control(fd)
+
+    def stop_transmit(self, fd):
+        mitogen.core._vv and IOLOG.debug('%r.stop_transmit(%r)', self, fd)
+        self._wfds.pop(fd, None)
+        self._control(fd)
+
+    _inmask = (getattr(select, 'EPOLLIN', 0) |
+               getattr(select, 'EPOLLHUP', 0))
+
+    def _poll(self, timeout):
+        the_timeout = -1
+        if timeout is not None:
+            the_timeout = timeout
+
+        events, _ = mitogen.core.io_op(self._epoll.poll, the_timeout, 32)
+        for fd, event in events:
+            if event & self._inmask:
+                data, gen = self._rfds.get(fd, (None, None))
+                if gen and gen < self._generation:
+                    # Events can still be read for an already-discarded fd.
+                    mitogen.core._vv and IOLOG.debug('%r: POLLIN: %r', self, fd)
+                    yield data
+            if event & select.EPOLLOUT:
+                data, gen = self._wfds.get(fd, (None, None))
+                if gen and gen < self._generation:
+                    mitogen.core._vv and IOLOG.debug('%r: POLLOUT: %r', self, fd)
+                    yield data
+
+
+# 2.4 and 2.5 only had select.select() and select.poll().
+for _klass in mitogen.core.Poller, PollPoller, KqueuePoller, EpollPoller:
+    if _klass.SUPPORTED:
+        PREFERRED_POLLER = _klass
+
+# For processes that start many threads or connections, it's possible Latch
+# will also get high-numbered FDs, and so select() becomes useless there too.
+# So swap in our favourite poller.
+if PollPoller.SUPPORTED:
+    mitogen.core.Latch.poller_class = PollPoller
+else:
+    mitogen.core.Latch.poller_class = PREFERRED_POLLER
+
+
+class LineLoggingProtocolMixin(object):
+    def __init__(self, **kwargs):
+        super(LineLoggingProtocolMixin, self).__init__(**kwargs)
+        self.logged_lines = []
+        self.logged_partial = None
+
+    def on_line_received(self, line):
+        self.logged_partial = None
+        self.logged_lines.append((mitogen.core.now(), line))
+        self.logged_lines[:] = self.logged_lines[-100:]
+        return super(LineLoggingProtocolMixin, self).on_line_received(line)
+
+    def on_partial_line_received(self, line):
+        self.logged_partial = line
+        return super(LineLoggingProtocolMixin, self).on_partial_line_received(line)
+
+    def on_disconnect(self, broker):
+        if self.logged_partial:
+            self.logged_lines.append((mitogen.core.now(), self.logged_partial))
+            self.logged_partial = None
+        super(LineLoggingProtocolMixin, self).on_disconnect(broker)
+
+
+def get_history(streams):
+    history = []
+    for stream in streams:
+        if stream:
+            history.extend(getattr(stream.protocol, 'logged_lines', []))
+    history.sort()
+
+    s = b('\n').join(h[1] for h in history)
+    return mitogen.core.to_text(s)
+
+
+class RegexProtocol(LineLoggingProtocolMixin, mitogen.core.DelimitedProtocol):
+    """
+    Implement a delimited protocol where messages matching a set of regular
+    expressions are dispatched to individual handler methods. Input is
+    dispatches using :attr:`PATTERNS` and :attr:`PARTIAL_PATTERNS`, before
+    falling back to :meth:`on_unrecognized_line_received` and
+    :meth:`on_unrecognized_partial_line_received`.
+    """
+    #: A sequence of 2-tuples of the form `(compiled pattern, method)` for
+    #: patterns that should be matched against complete (delimited) messages,
+    #: i.e. full lines.
+    PATTERNS = []
+
+    #: Like :attr:`PATTERNS`, but patterns that are matched against incomplete
+    #: lines.
+    PARTIAL_PATTERNS = []
+
+    def on_line_received(self, line):
+        super(RegexProtocol, self).on_line_received(line)
+        for pattern, func in self.PATTERNS:
+            match = pattern.search(line)
+            if match is not None:
+                return func(self, line, match)
+
+        return self.on_unrecognized_line_received(line)
+
+    def on_unrecognized_line_received(self, line):
+        LOG.debug('%s: (unrecognized): %s',
+            self.stream.name, line.decode('utf-8', 'replace'))
+
+    def on_partial_line_received(self, line):
+        super(RegexProtocol, self).on_partial_line_received(line)
+        LOG.debug('%s: (partial): %s',
+            self.stream.name, line.decode('utf-8', 'replace'))
+        for pattern, func in self.PARTIAL_PATTERNS:
+            match = pattern.search(line)
+            if match is not None:
+                return func(self, line, match)
+
+        return self.on_unrecognized_partial_line_received(line)
+
+    def on_unrecognized_partial_line_received(self, line):
+        LOG.debug('%s: (unrecognized partial): %s',
+            self.stream.name, line.decode('utf-8', 'replace'))
+
+
+class BootstrapProtocol(RegexProtocol):
+    """
+    Respond to stdout of a child during bootstrap. Wait for :attr:`EC0_MARKER`
+    to be written by the first stage to indicate it can receive the bootstrap,
+    then await :attr:`EC1_MARKER` to indicate success, and
+    :class:`MitogenProtocol` can be enabled.
+    """
+    #: Sentinel value emitted by the first stage to indicate it is ready to
+    #: receive the compressed bootstrap. For :mod:`mitogen.ssh` this must have
+    #: length of at least `max(len('password'), len('debug1:'))`
+    EC0_MARKER = b('MITO000')
+    EC1_MARKER = b('MITO001')
+    EC2_MARKER = b('MITO002')
+
+    def __init__(self, broker):
+        super(BootstrapProtocol, self).__init__()
+        self._writer = mitogen.core.BufferedWriter(broker, self)
+
+    def on_transmit(self, broker):
+        self._writer.on_transmit(broker)
+
+    def _on_ec0_received(self, line, match):
+        LOG.debug('%r: first stage started succcessfully', self)
+        self._writer.write(self.stream.conn.get_preamble())
+
+    def _on_ec1_received(self, line, match):
+        LOG.debug('%r: first stage received mitogen.core source', self)
+
+    def _on_ec2_received(self, line, match):
+        LOG.debug('%r: new child booted successfully', self)
+        self.stream.conn._complete_connection()
+        return False
+
+    def on_unrecognized_line_received(self, line):
+        LOG.debug('%s: stdout: %s', self.stream.name,
+            line.decode('utf-8', 'replace'))
+
+    PATTERNS = [
+        (re.compile(EC0_MARKER), _on_ec0_received),
+        (re.compile(EC1_MARKER), _on_ec1_received),
+        (re.compile(EC2_MARKER), _on_ec2_received),
+    ]
+
+
+class LogProtocol(LineLoggingProtocolMixin, mitogen.core.DelimitedProtocol):
+    """
+    For "hybrid TTY/socketpair" mode, after connection setup a spare TTY master
+    FD exists that cannot be closed, and to which SSH or sudo may continue
+    writing log messages.
+
+    The descriptor cannot be closed since the UNIX TTY layer sends SIGHUP to
+    processes whose controlling TTY is the slave whose master side was closed.
+    LogProtocol takes over this FD and creates log messages for anything
+    written to it.
+    """
+    def on_line_received(self, line):
+        """
+        Read a line, decode it as UTF-8, and log it.
+        """
+        super(LogProtocol, self).on_line_received(line)
+        LOG.info(u'%s: %s', self.stream.name, line.decode('utf-8', 'replace'))
+
+
+class MitogenProtocol(mitogen.core.MitogenProtocol):
+    """
+    Extend core.MitogenProtocol to cause SHUTDOWN to be sent to the child
+    during graceful shutdown.
+    """
+    def on_shutdown(self, broker):
+        """
+        Respond to the broker's request for the stream to shut down by sending
+        SHUTDOWN to the child.
+        """
+        LOG.debug('%r: requesting child shutdown', self)
+        self._send(
+            mitogen.core.Message(
+                src_id=mitogen.context_id,
+                dst_id=self.remote_id,
+                handle=mitogen.core.SHUTDOWN,
+            )
+        )
+
+
+class Options(object):
+    name = None
+
+    #: The path to the remote Python interpreter.
+    python_path = get_sys_executable()
+
+    #: Maximum time to wait for a connection attempt.
+    connect_timeout = 30.0
+
+    #: True to cause context to write verbose /tmp/mitogen.<pid>.log.
+    debug = False
+
+    #: True to cause context to write /tmp/mitogen.stats.<pid>.<thread>.log.
+    profiling = False
+
+    #: True if unidirectional routing is enabled in the new child.
+    unidirectional = False
+
+    #: Passed via Router wrapper methods, must eventually be passed to
+    #: ExternalContext.main().
+    max_message_size = None
+
+    #: Remote name.
+    remote_name = None
+
+    #: Derived from :py:attr:`connect_timeout`; absolute floating point
+    #: UNIX timestamp after which the connection attempt should be abandoned.
+    connect_deadline = None
+
+    def __init__(self, max_message_size, name=None, remote_name=None,
+                 python_path=None, debug=False, connect_timeout=None,
+                 profiling=False, unidirectional=False, old_router=None):
+        self.name = name
+        self.max_message_size = max_message_size
+        if python_path:
+            self.python_path = python_path
+        if connect_timeout:
+            self.connect_timeout = connect_timeout
+        if remote_name is None:
+            remote_name = get_default_remote_name()
+        if '/' in remote_name or '\\' in remote_name:
+            raise ValueError('remote_name= cannot contain slashes')
+        if remote_name:
+            self.remote_name = mitogen.core.to_text(remote_name)
+        self.debug = debug
+        self.profiling = profiling
+        self.unidirectional = unidirectional
+        self.max_message_size = max_message_size
+        self.connect_deadline = mitogen.core.now() + self.connect_timeout
+
+
+class Connection(object):
+    """
+    Manage the lifetime of a set of :class:`Streams <Stream>` connecting to a
+    remote Python interpreter, including bootstrap, disconnection, and external
+    tool integration.
+
+    Base for streams capable of starting children.
+    """
+    options_class = Options
+
+    #: The protocol attached to stdio of the child.
+    stream_protocol_class = BootstrapProtocol
+
+    #: The protocol attached to stderr of the child.
+    diag_protocol_class = LogProtocol
+
+    #: :class:`Process`
+    proc = None
+
+    #: :class:`mitogen.core.Stream` with sides connected to stdin/stdout.
+    stdio_stream = None
+
+    #: If `proc.stderr` is set, referencing either a plain pipe or the
+    #: controlling TTY, this references the corresponding
+    #: :class:`LogProtocol`'s stream, allowing it to be disconnected when this
+    #: stream is disconnected.
+    stderr_stream = None
+
+    #: Function with the semantics of :func:`create_child` used to create the
+    #: child process.
+    create_child = staticmethod(create_child)
+
+    #: Dictionary of extra kwargs passed to :attr:`create_child`.
+    create_child_args = {}
+
+    #: :data:`True` if the remote has indicated that it intends to detach, and
+    #: should not be killed on disconnect.
+    detached = False
+
+    #: If :data:`True`, indicates the child should not be killed during
+    #: graceful detachment, as it the actual process implementing the child
+    #: context. In all other cases, the subprocess is SSH, sudo, or a similar
+    #: tool that should be reminded to quit during disconnection.
+    child_is_immediate_subprocess = True
+
+    #: Prefix given to default names generated by :meth:`connect`.
+    name_prefix = u'local'
+
+    #: :class:`Timer` that runs :meth:`_on_timer_expired` when connection
+    #: timeout occurs.
+    _timer = None
+
+    #: When disconnection completes, instance of :class:`Reaper` used to wait
+    #: on the exit status of the subprocess.
+    _reaper = None
+
+    #: On failure, the exception object that should be propagated back to the
+    #: user.
+    exception = None
+
+    #: Extra text appended to :class:`EofError` if that exception is raised on
+    #: a failed connection attempt. May be used in subclasses to hint at common
+    #: problems with a particular connection method.
+    eof_error_hint = None
+
+    def __init__(self, options, router):
+        #: :class:`Options`
+        self.options = options
+        self._router = router
+
+    def __repr__(self):
+        return 'Connection(%r)' % (self.stdio_stream,)
+
+    # Minimised, gzipped, base64'd and passed to 'python -c'. It forks, dups
+    # file descriptor 0 as 100, creates a pipe, then execs a new interpreter
+    # with a custom argv.
+    #   * Optimized for minimum byte count after minification & compression.
+    #   * 'CONTEXT_NAME' and 'PREAMBLE_COMPRESSED_LEN' are substituted with
+    #     their respective values.
+    #   * CONTEXT_NAME must be prefixed with the name of the Python binary in
+    #     order to allow virtualenvs to detect their install prefix.
+    #   * For Darwin, OS X installs a craptacular argv0-introspecting Python
+    #     version switcher as /usr/bin/python. Override attempts to call it
+    #     with an explicit call to python2.7
+    #
+    # Locals:
+    #   R: read side of interpreter stdin.
+    #   W: write side of interpreter stdin.
+    #   r: read side of core_src FD.
+    #   w: write side of core_src FD.
+    #   C: the decompressed core source.
+
+    # Final os.close(2) to avoid --py-debug build from corrupting stream with
+    # "[1234 refs]" during exit.
+    @staticmethod
+    def _first_stage():
+        R,W=os.pipe()
+        r,w=os.pipe()
+        if os.fork():
+            os.dup2(0,100)
+            os.dup2(R,0)
+            os.dup2(r,101)
+            os.close(R)
+            os.close(r)
+            os.close(W)
+            os.close(w)
+            if sys.platform == 'darwin' and sys.executable == '/usr/bin/python':
+                sys.executable += sys.version[:3]
+            os.environ['ARGV0']=sys.executable
+            os.execl(sys.executable,sys.executable+'(mitogen:CONTEXT_NAME)')
+        os.write(1,'MITO000\n'.encode())
+        C=_(os.fdopen(0,'rb').read(PREAMBLE_COMPRESSED_LEN),'zip')
+        fp=os.fdopen(W,'wb',0)
+        fp.write(C)
+        fp.close()
+        fp=os.fdopen(w,'wb',0)
+        fp.write(C)
+        fp.close()
+        os.write(1,'MITO001\n'.encode())
+        os.close(2)
+
+    def get_python_argv(self):
+        """
+        Return the initial argument vector elements necessary to invoke Python,
+        by returning a 1-element list containing :attr:`python_path` if it is a
+        string, or simply returning it if it is already a list.
+
+        This allows emulation of existing tools where the Python invocation may
+        be set to e.g. `['/usr/bin/env', 'python']`.
+        """
+        if isinstance(self.options.python_path, list):
+            return self.options.python_path
+        return [self.options.python_path]
+
+    def get_boot_command(self):
+        source = inspect.getsource(self._first_stage)
+        source = textwrap.dedent('\n'.join(source.strip().split('\n')[2:]))
+        source = source.replace('    ', '\t')
+        source = source.replace('CONTEXT_NAME', self.options.remote_name)
+        preamble_compressed = self.get_preamble()
+        source = source.replace('PREAMBLE_COMPRESSED_LEN',
+                                str(len(preamble_compressed)))
+        compressed = zlib.compress(source.encode(), 9)
+        encoded = codecs.encode(compressed, 'base64').replace(b('\n'), b(''))
+        # We can't use bytes.decode() in 3.x since it was restricted to always
+        # return unicode, so codecs.decode() is used instead. In 3.x
+        # codecs.decode() requires a bytes object. Since we must be compatible
+        # with 2.4 (no bytes literal), an extra .encode() either returns the
+        # same str (2.x) or an equivalent bytes (3.x).
+        return self.get_python_argv() + [
+            '-c',
+            'import codecs,os,sys;_=codecs.decode;'
+            'exec(_(_("%s".encode(),"base64"),"zip"))' % (encoded.decode(),)
+        ]
+
+    def get_econtext_config(self):
+        assert self.options.max_message_size is not None
+        parent_ids = mitogen.parent_ids[:]
+        parent_ids.insert(0, mitogen.context_id)
+        return {
+            'parent_ids': parent_ids,
+            'context_id': self.context.context_id,
+            'debug': self.options.debug,
+            'profiling': self.options.profiling,
+            'unidirectional': self.options.unidirectional,
+            'log_level': get_log_level(),
+            'whitelist': self._router.get_module_whitelist(),
+            'blacklist': self._router.get_module_blacklist(),
+            'max_message_size': self.options.max_message_size,
+            'version': mitogen.__version__,
+        }
+
+    def get_preamble(self):
+        suffix = (
+            '\nExternalContext(%r).main()\n' %\
+            (self.get_econtext_config(),)
+        )
+        partial = get_core_source_partial()
+        return partial.append(suffix.encode('utf-8'))
+
+    def _get_name(self):
+        """
+        Called by :meth:`connect` after :attr:`pid` is known. Subclasses can
+        override it to specify a default stream name, or set
+        :attr:`name_prefix` to generate a default format.
+        """
+        return u'%s.%s' % (self.name_prefix, self.proc.pid)
+
+    def start_child(self):
+        args = self.get_boot_command()
+        LOG.debug('command line for %r: %s', self, Argv(args))
+        try:
+            return self.create_child(args=args, **self.create_child_args)
+        except OSError:
+            e = sys.exc_info()[1]
+            msg = 'Child start failed: %s. Command was: %s' % (e, Argv(args))
+            raise mitogen.core.StreamError(msg)
+
+    def _adorn_eof_error(self, e):
+        """
+        Subclasses may provide additional information in the case of a failed
+        connection.
+        """
+        if self.eof_error_hint:
+            e.args = ('%s\n\n%s' % (e.args[0], self.eof_error_hint),)
+
+    def _complete_connection(self):
+        self._timer.cancel()
+        if not self.exception:
+            mitogen.core.unlisten(self._router.broker, 'shutdown',
+                                  self._on_broker_shutdown)
+            self._router.register(self.context, self.stdio_stream)
+            self.stdio_stream.set_protocol(
+                MitogenProtocol(
+                    router=self._router,
+                    remote_id=self.context.context_id,
+                )
+            )
+            self._router.route_monitor.notice_stream(self.stdio_stream)
+        self.latch.put()
+
+    def _fail_connection(self, exc):
+        """
+        Fail the connection attempt.
+        """
+        LOG.debug('failing connection %s due to %r',
+                  self.stdio_stream and self.stdio_stream.name, exc)
+        if self.exception is None:
+            self._adorn_eof_error(exc)
+            self.exception = exc
+            mitogen.core.unlisten(self._router.broker, 'shutdown',
+                                  self._on_broker_shutdown)
+        for stream in self.stdio_stream, self.stderr_stream:
+            if stream and not stream.receive_side.closed:
+                stream.on_disconnect(self._router.broker)
+        self._complete_connection()
+
+    eof_error_msg = 'EOF on stream; last 100 lines received:\n'
+
+    def on_stdio_disconnect(self):
+        """
+        Handle stdio stream disconnection by failing the Connection if the
+        stderr stream has already been closed. Otherwise, wait for it to close
+        (or timeout), to allow buffered diagnostic logs to be consumed.
+
+        It is normal that when a subprocess aborts, stdio has nothing buffered
+        when it is closed, thus signalling readability, causing an empty read
+        (interpreted as indicating disconnection) on the next loop iteration,
+        even if its stderr pipe has lots of diagnostic logs still buffered in
+        the kernel. Therefore we must wait for both pipes to indicate they are
+        empty before triggering connection failure.
+        """
+        stderr = self.stderr_stream
+        if stderr is None or stderr.receive_side.closed:
+            self._on_streams_disconnected()
+
+    def on_stderr_disconnect(self):
+        """
+        Inverse of :func:`on_stdio_disconnect`.
+        """
+        if self.stdio_stream.receive_side.closed:
+            self._on_streams_disconnected()
+
+    def _on_streams_disconnected(self):
+        """
+        When disconnection has been detected for both streams, cancel the
+        connection timer, mark the connection failed, and reap the child
+        process. Do nothing if the timer has already been cancelled, indicating
+        some existing failure has already been noticed.
+        """
+        if self._timer.active:
+            self._timer.cancel()
+            self._fail_connection(EofError(
+                self.eof_error_msg + get_history(
+                    [self.stdio_stream, self.stderr_stream]
+                )
+            ))
+
+        if self._reaper:
+            return
+
+        self._reaper = Reaper(
+            broker=self._router.broker,
+            proc=self.proc,
+            kill=not (
+                (self.detached and self.child_is_immediate_subprocess) or
+                # Avoid killing so child has chance to write cProfile data
+                self._router.profiling
+            ),
+            # Don't delay shutdown waiting for a detached child, since the
+            # detached child may expect to live indefinitely after its parent
+            # exited.
+            wait_on_shutdown=(not self.detached),
+        )
+        self._reaper.reap()
+
+    def _on_broker_shutdown(self):
+        """
+        Respond to broker.shutdown() being called by failing the connection
+        attempt.
+        """
+        self._fail_connection(CancelledError(BROKER_SHUTDOWN_MSG))
+
+    def stream_factory(self):
+        return self.stream_protocol_class.build_stream(
+            broker=self._router.broker,
+        )
+
+    def stderr_stream_factory(self):
+        return self.diag_protocol_class.build_stream()
+
+    def _setup_stdio_stream(self):
+        stream = self.stream_factory()
+        stream.conn = self
+        stream.name = self.options.name or self._get_name()
+        stream.accept(self.proc.stdout, self.proc.stdin)
+
+        mitogen.core.listen(stream, 'disconnect', self.on_stdio_disconnect)
+        self._router.broker.start_receive(stream)
+        return stream
+
+    def _setup_stderr_stream(self):
+        stream = self.stderr_stream_factory()
+        stream.conn = self
+        stream.name = self.options.name or self._get_name()
+        stream.accept(self.proc.stderr, self.proc.stderr)
+
+        mitogen.core.listen(stream, 'disconnect', self.on_stderr_disconnect)
+        self._router.broker.start_receive(stream)
+        return stream
+
+    def _on_timer_expired(self):
+        self._fail_connection(
+            mitogen.core.TimeoutError(
+                'Failed to setup connection after %.2f seconds',
+                self.options.connect_timeout,
+            )
+        )
+
+    def _async_connect(self):
+        LOG.debug('creating connection to context %d using %s',
+                  self.context.context_id, self.__class__.__module__)
+        mitogen.core.listen(self._router.broker, 'shutdown',
+                            self._on_broker_shutdown)
+        self._timer = self._router.broker.timers.schedule(
+            when=self.options.connect_deadline,
+            func=self._on_timer_expired,
+        )
+
+        try:
+            self.proc = self.start_child()
+        except Exception:
+            LOG.debug('failed to start child', exc_info=True)
+            self._fail_connection(sys.exc_info()[1])
+            return
+
+        LOG.debug('child for %r started: pid:%r stdin:%r stdout:%r stderr:%r',
+                  self, self.proc.pid,
+                  self.proc.stdin.fileno(),
+                  self.proc.stdout.fileno(),
+                  self.proc.stderr and self.proc.stderr.fileno())
+
+        self.stdio_stream = self._setup_stdio_stream()
+        if self.context.name is None:
+            self.context.name = self.stdio_stream.name
+        self.proc.name = self.stdio_stream.name
+        if self.proc.stderr:
+            self.stderr_stream = self._setup_stderr_stream()
+
+    def connect(self, context):
+        self.context = context
+        self.latch = mitogen.core.Latch()
+        self._router.broker.defer(self._async_connect)
+        self.latch.get()
+        if self.exception:
+            raise self.exception
+
+
+class ChildIdAllocator(object):
+    """
+    Allocate new context IDs from a block of unique context IDs allocated by
+    the master process.
+    """
+    def __init__(self, router):
+        self.router = router
+        self.lock = threading.Lock()
+        self.it = iter(xrange(0))
+
+    def allocate(self):
+        """
+        Allocate an ID, requesting a fresh block from the master if the
+        existing block is exhausted.
+
+        :returns:
+            The new context ID.
+
+        .. warning::
+
+            This method is not safe to call from the :class:`Broker` thread, as
+            it may block on IO of its own.
+        """
+        self.lock.acquire()
+        try:
+            for id_ in self.it:
+                return id_
+
+            master = self.router.context_by_id(0)
+            start, end = master.send_await(
+                mitogen.core.Message(dst_id=0, handle=mitogen.core.ALLOCATE_ID)
+            )
+            self.it = iter(xrange(start, end))
+        finally:
+            self.lock.release()
+
+        return self.allocate()
+
+
+class CallChain(object):
+    """
+    Deliver :data:`mitogen.core.CALL_FUNCTION` messages to a target context,
+    optionally threading related calls so an exception in an earlier call
+    cancels subsequent calls.
+
+    :param mitogen.core.Context context:
+        Target context.
+    :param bool pipelined:
+        Enable pipelining.
+
+    :meth:`call`, :meth:`call_no_reply` and :meth:`call_async`
+    normally issue calls and produce responses with no memory of prior
+    exceptions. If a call made with :meth:`call_no_reply` fails, the exception
+    is logged to the target context's logging framework.
+
+    **Pipelining**
+
+    When pipelining is enabled, if an exception occurs during a call,
+    subsequent calls made by the same :class:`CallChain` fail with the same
+    exception, including those already in-flight on the network, and no further
+    calls execute until :meth:`reset` is invoked.
+
+    No exception is logged for calls made with :meth:`call_no_reply`, instead
+    the exception is saved and reported as the result of subsequent
+    :meth:`call` or :meth:`call_async` calls.
+
+    Sequences of asynchronous calls can be made without wasting network
+    round-trips to discover if prior calls succeed, and chains originating from
+    multiple unrelated source contexts may overlap concurrently at a target
+    context without interference.
+
+    In this example, 4 calls complete in one round-trip::
+
+        chain = mitogen.parent.CallChain(context, pipelined=True)
+        chain.call_no_reply(os.mkdir, '/tmp/foo')
+
+        # If previous mkdir() failed, this never runs:
+        chain.call_no_reply(os.mkdir, '/tmp/foo/bar')
+
+        # If either mkdir() failed, this never runs, and the exception is
+        # asynchronously delivered to the receiver.
+        recv = chain.call_async(subprocess.check_output, '/tmp/foo')
+
+        # If anything so far failed, this never runs, and raises the exception.
+        chain.call(do_something)
+
+        # If this code was executed, the exception would also be raised.
+        if recv.get().unpickle() == 'baz':
+            pass
+
+    When pipelining is enabled, :meth:`reset` must be invoked to ensure any
+    exception is discarded, otherwise unbounded memory usage is possible in
+    long-running programs. The context manager protocol is supported to ensure
+    :meth:`reset` is always invoked::
+
+        with mitogen.parent.CallChain(context, pipelined=True) as chain:
+            chain.call_no_reply(...)
+            chain.call_no_reply(...)
+            chain.call_no_reply(...)
+            chain.call(...)
+
+        # chain.reset() automatically invoked.
+    """
+    def __init__(self, context, pipelined=False):
+        self.context = context
+        if pipelined:
+            self.chain_id = self.make_chain_id()
+        else:
+            self.chain_id = None
+
+    @classmethod
+    def make_chain_id(cls):
+        return '%s-%s-%x-%x' % (
+            socket.gethostname(),
+            os.getpid(),
+            thread.get_ident(),
+            int(1e6 * mitogen.core.now()),
+        )
+
+    def __repr__(self):
+        return '%s(%s)' % (self.__class__.__name__, self.context)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, _1, _2, _3):
+        self.reset()
+
+    def reset(self):
+        """
+        Instruct the target to forget any related exception.
+        """
+        if not self.chain_id:
+            return
+
+        saved, self.chain_id = self.chain_id, None
+        try:
+            self.call_no_reply(mitogen.core.Dispatcher.forget_chain, saved)
+        finally:
+            self.chain_id = saved
+
+    closures_msg = (
+        'Mitogen cannot invoke closures, as doing so would require '
+        'serializing arbitrary program state, and no universal '
+        'method exists to recover a reference to them.'
+    )
+
+    lambda_msg = (
+        'Mitogen cannot invoke anonymous functions, as no universal method '
+        'exists to recover a reference to an anonymous function.'
+    )
+
+    method_msg = (
+        'Mitogen cannot invoke instance methods, as doing so would require '
+        'serializing arbitrary program state.'
+    )
+
+    def make_msg(self, fn, *args, **kwargs):
+        if getattr(fn, closure_attr, None) is not None:
+            raise TypeError(self.closures_msg)
+        if fn.__name__ == '<lambda>':
+            raise TypeError(self.lambda_msg)
+
+        if inspect.ismethod(fn):
+            im_self = getattr(fn, IM_SELF_ATTR)
+            if not inspect.isclass(im_self):
+                raise TypeError(self.method_msg)
+            klass = mitogen.core.to_text(im_self.__name__)
+        else:
+            klass = None
+
+        tup = (
+            self.chain_id,
+            mitogen.core.to_text(fn.__module__),
+            klass,
+            mitogen.core.to_text(fn.__name__),
+            args,
+            mitogen.core.Kwargs(kwargs)
+        )
+        return mitogen.core.Message.pickled(tup,
+            handle=mitogen.core.CALL_FUNCTION)
+
+    def call_no_reply(self, fn, *args, **kwargs):
+        """
+        Like :meth:`call_async`, but do not wait for a return value, and inform
+        the target context no reply is expected. If the call fails and
+        pipelining is disabled, the exception will be logged to the target
+        context's logging framework.
+        """
+        LOG.debug('starting no-reply function call to %r: %r',
+                  self.context.name or self.context.context_id,
+                  CallSpec(fn, args, kwargs))
+        self.context.send(self.make_msg(fn, *args, **kwargs))
+
+    def call_async(self, fn, *args, **kwargs):
+        """
+        Arrange for `fn(*args, **kwargs)` to be invoked on the context's main
+        thread.
+
+        :param fn:
+            A free function in module scope or a class method of a class
+            directly reachable from module scope:
+
+            .. code-block:: python
+
+                # mymodule.py
+
+                def my_func():
+                    '''A free function reachable as mymodule.my_func'''
+
+                class MyClass:
+                    @classmethod
+                    def my_classmethod(cls):
+                        '''Reachable as mymodule.MyClass.my_classmethod'''
+
+                    def my_instancemethod(self):
+                        '''Unreachable: requires a class instance!'''
+
+                    class MyEmbeddedClass:
+                        @classmethod
+                        def my_classmethod(cls):
+                            '''Not directly reachable from module scope!'''
+
+        :param tuple args:
+            Function arguments, if any. See :ref:`serialization-rules` for
+            permitted types.
+        :param dict kwargs:
+            Function keyword arguments, if any. See :ref:`serialization-rules`
+            for permitted types.
+        :returns:
+            :class:`mitogen.core.Receiver` configured to receive the result of
+            the invocation:
+
+            .. code-block:: python
+
+                recv = context.call_async(os.check_output, 'ls /tmp/')
+                try:
+                    # Prints output once it is received.
+                    msg = recv.get()
+                    print(msg.unpickle())
+                except mitogen.core.CallError, e:
+                    print('Call failed:', str(e))
+
+            Asynchronous calls may be dispatched in parallel to multiple
+            contexts and consumed as they complete using
+            :class:`mitogen.select.Select`.
+        """
+        LOG.debug('starting function call to %s: %r',
+                  self.context.name or self.context.context_id,
+                  CallSpec(fn, args, kwargs))
+        return self.context.send_async(self.make_msg(fn, *args, **kwargs))
+
+    def call(self, fn, *args, **kwargs):
+        """
+        Like :meth:`call_async`, but block until the return value is available.
+        Equivalent to::
+
+            call_async(fn, *args, **kwargs).get().unpickle()
+
+        :returns:
+            The function's return value.
+        :raises mitogen.core.CallError:
+            An exception was raised in the remote context during execution.
+        """
+        receiver = self.call_async(fn, *args, **kwargs)
+        return receiver.get().unpickle(throw_dead=False)
+
+
+class Context(mitogen.core.Context):
+    """
+    Extend :class:`mitogen.core.Context` with functionality useful to masters,
+    and child contexts who later become parents. Currently when this class is
+    required, the target context's router is upgraded at runtime.
+    """
+    #: A :class:`CallChain` instance constructed by default, with pipelining
+    #: disabled. :meth:`call`, :meth:`call_async` and :meth:`call_no_reply` use
+    #: this instance.
+    call_chain_class = CallChain
+
+    via = None
+
+    def __init__(self, *args, **kwargs):
+        super(Context, self).__init__(*args, **kwargs)
+        self.default_call_chain = self.call_chain_class(self)
+
+    def __ne__(self, other):
+        return not (self == other)
+
+    def __eq__(self, other):
+        return (
+            isinstance(other, mitogen.core.Context) and
+            (other.context_id == self.context_id) and
+            (other.router == self.router)
+        )
+
+    def __hash__(self):
+        return hash((self.router, self.context_id))
+
+    def call_async(self, fn, *args, **kwargs):
+        """
+        See :meth:`CallChain.call_async`.
+        """
+        return self.default_call_chain.call_async(fn, *args, **kwargs)
+
+    def call(self, fn, *args, **kwargs):
+        """
+        See :meth:`CallChain.call`.
+        """
+        return self.default_call_chain.call(fn, *args, **kwargs)
+
+    def call_no_reply(self, fn, *args, **kwargs):
+        """
+        See :meth:`CallChain.call_no_reply`.
+        """
+        self.default_call_chain.call_no_reply(fn, *args, **kwargs)
+
+    def shutdown(self, wait=False):
+        """
+        Arrange for the context to receive a ``SHUTDOWN`` message, triggering
+        graceful shutdown.
+
+        Due to a lack of support for timers, no attempt is made yet to force
+        terminate a hung context using this method. This will be fixed shortly.
+
+        :param bool wait:
+            If :data:`True`, block the calling thread until the context has
+            completely terminated.
+
+        :returns:
+            If `wait` is :data:`False`, returns a :class:`mitogen.core.Latch`
+            whose :meth:`get() <mitogen.core.Latch.get>` method returns
+            :data:`None` when shutdown completes. The `timeout` parameter may
+            be used to implement graceful timeouts.
+        """
+        LOG.debug('%r.shutdown() sending SHUTDOWN', self)
+        latch = mitogen.core.Latch()
+        mitogen.core.listen(self, 'disconnect', lambda: latch.put(None))
+        self.send(
+            mitogen.core.Message(
+                handle=mitogen.core.SHUTDOWN,
+            )
+        )
+
+        if wait:
+            latch.get()
+        else:
+            return latch
+
+
+class RouteMonitor(object):
+    """
+    Generate and respond to :data:`mitogen.core.ADD_ROUTE` and
+    :data:`mitogen.core.DEL_ROUTE` messages sent to the local context by
+    maintaining a table of available routes, and propagating messages towards
+    parents and siblings as appropriate.
+
+    :class:`RouteMonitor` is responsible for generating routing messages for
+    directly attached children. It learns of new children via
+    :meth:`notice_stream` called by :class:`Router`, and subscribes to their
+    ``disconnect`` event to learn when they disappear.
+
+    In children, constructing this class overwrites the stub
+    :data:`mitogen.core.DEL_ROUTE` handler installed by
+    :class:`mitogen.core.ExternalContext`, which is expected behaviour when a
+    child is beging upgraded in preparation to become a parent of children of
+    its own.
+
+    By virtue of only being active while responding to messages from a handler,
+    RouteMonitor lives entirely on the broker thread, so its data requires no
+    locking.
+
+    :param mitogen.master.Router router:
+        Router to install handlers on.
+    :param mitogen.core.Context parent:
+        :data:`None` in the master process, or reference to the parent context
+        we should propagate route updates towards.
+    """
+    def __init__(self, router, parent=None):
+        self.router = router
+        self.parent = parent
+        self._log = logging.getLogger('mitogen.route_monitor')
+        #: Mapping of Stream instance to integer context IDs reachable via the
+        #: stream; used to cleanup routes during disconnection.
+        self._routes_by_stream = {}
+        self.router.add_handler(
+            fn=self._on_add_route,
+            handle=mitogen.core.ADD_ROUTE,
+            persist=True,
+            policy=is_immediate_child,
+            overwrite=True,
+        )
+        self.router.add_handler(
+            fn=self._on_del_route,
+            handle=mitogen.core.DEL_ROUTE,
+            persist=True,
+            policy=is_immediate_child,
+            overwrite=True,
+        )
+
+    def __repr__(self):
+        return 'RouteMonitor()'
+
+    def _send_one(self, stream, handle, target_id, name):
+        """
+        Compose and send an update message on a stream.
+
+        :param mitogen.core.Stream stream:
+            Stream to send it on.
+        :param int handle:
+            :data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
+        :param int target_id:
+            ID of the connecting or disconnecting context.
+        :param str name:
+            Context name or :data:`None`.
+        """
+        if not stream:
+            # We may not have a stream during shutdown.
+            return
+
+        data = str(target_id)
+        if name:
+            data = '%s:%s' % (target_id, name)
+        stream.protocol.send(
+            mitogen.core.Message(
+                handle=handle,
+                data=data.encode('utf-8'),
+                dst_id=stream.protocol.remote_id,
+            )
+        )
+
+    def _propagate_up(self, handle, target_id, name=None):
+        """
+        In a non-master context, propagate an update towards the master.
+
+        :param int handle:
+            :data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
+        :param int target_id:
+            ID of the connecting or disconnecting context.
+        :param str name:
+            For :data:`mitogen.core.ADD_ROUTE`, the name of the new context
+            assigned by its parent. This is used by parents to assign the
+            :attr:`mitogen.core.Context.name` attribute.
+        """
+        if self.parent:
+            stream = self.router.stream_by_id(self.parent.context_id)
+            self._send_one(stream, handle, target_id, name)
+
+    def _propagate_down(self, handle, target_id):
+        """
+        For DEL_ROUTE, we additionally want to broadcast the message to any
+        stream that has ever communicated with the disconnecting ID, so
+        core.py's :meth:`mitogen.core.Router._on_del_route` can turn the
+        message into a disconnect event.
+
+        :param int handle:
+            :data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
+        :param int target_id:
+            ID of the connecting or disconnecting context.
+        """
+        for stream in self.router.get_streams():
+            if target_id in stream.protocol.egress_ids and (
+                    (self.parent is None) or
+                    (self.parent.context_id != stream.protocol.remote_id)
+                ):
+                self._send_one(stream, mitogen.core.DEL_ROUTE, target_id, None)
+
+    def notice_stream(self, stream):
+        """
+        When this parent is responsible for a new directly connected child
+        stream, we're also responsible for broadcasting
+        :data:`mitogen.core.DEL_ROUTE` upstream when that child disconnects.
+        """
+        self._routes_by_stream[stream] = set([stream.protocol.remote_id])
+        self._propagate_up(mitogen.core.ADD_ROUTE, stream.protocol.remote_id,
+                        stream.name)
+        mitogen.core.listen(
+            obj=stream,
+            name='disconnect',
+            func=lambda: self._on_stream_disconnect(stream),
+        )
+
+    def get_routes(self, stream):
+        """
+        Return the set of context IDs reachable on a stream.
+
+        :param mitogen.core.Stream stream:
+        :returns: set([int])
+        """
+        return self._routes_by_stream.get(stream) or set()
+
+    def _on_stream_disconnect(self, stream):
+        """
+        Respond to disconnection of a local stream by propagating DEL_ROUTE for
+        any contexts we know were attached to it.
+        """
+        # During a stream crash it is possible for disconnect signal to fire
+        # twice, in which case ignore the second instance.
+        routes = self._routes_by_stream.pop(stream, None)
+        if routes is None:
+            return
+
+        self._log.debug('stream %s is gone; propagating DEL_ROUTE for %r',
+                        stream.name, routes)
+        for target_id in routes:
+            self.router.del_route(target_id)
+            self._propagate_up(mitogen.core.DEL_ROUTE, target_id)
+            self._propagate_down(mitogen.core.DEL_ROUTE, target_id)
+
+            context = self.router.context_by_id(target_id, create=False)
+            if context:
+                mitogen.core.fire(context, 'disconnect')
+
+    def _on_add_route(self, msg):
+        """
+        Respond to :data:`mitogen.core.ADD_ROUTE` by validating the source of
+        the message, updating the local table, and propagating the message
+        upwards.
+        """
+        if msg.is_dead:
+            return
+
+        target_id_s, _, target_name = bytes_partition(msg.data, b(':'))
+        target_name = target_name.decode()
+        target_id = int(target_id_s)
+        self.router.context_by_id(target_id).name = target_name
+        stream = self.router.stream_by_id(msg.src_id)
+        current = self.router.stream_by_id(target_id)
+        if current and current.protocol.remote_id != mitogen.parent_id:
+            self._log.error('Cannot add duplicate route to %r via %r, '
+                            'already have existing route via %r',
+                            target_id, stream, current)
+            return
+
+        self._log.debug('Adding route to %d via %r', target_id, stream)
+        self._routes_by_stream[stream].add(target_id)
+        self.router.add_route(target_id, stream)
+        self._propagate_up(mitogen.core.ADD_ROUTE, target_id, target_name)
+
+    def _on_del_route(self, msg):
+        """
+        Respond to :data:`mitogen.core.DEL_ROUTE` by validating the source of
+        the message, updating the local table, propagating the message
+        upwards, and downwards towards any stream that every had a message
+        forwarded from it towards the disconnecting context.
+        """
+        if msg.is_dead:
+            return
+
+        target_id = int(msg.data)
+        registered_stream = self.router.stream_by_id(target_id)
+        if registered_stream is None:
+            return
+
+        stream = self.router.stream_by_id(msg.src_id)
+        if registered_stream != stream:
+            self._log.error('received DEL_ROUTE for %d from %r, expected %r',
+                            target_id, stream, registered_stream)
+            return
+
+        context = self.router.context_by_id(target_id, create=False)
+        if context:
+            self._log.debug('firing local disconnect signal for %r', context)
+            mitogen.core.fire(context, 'disconnect')
+
+        self._log.debug('deleting route to %d via %r', target_id, stream)
+        routes = self._routes_by_stream.get(stream)
+        if routes:
+            routes.discard(target_id)
+
+        self.router.del_route(target_id)
+        if stream.protocol.remote_id != mitogen.parent_id:
+            self._propagate_up(mitogen.core.DEL_ROUTE, target_id)
+        self._propagate_down(mitogen.core.DEL_ROUTE, target_id)
+
+
+class Router(mitogen.core.Router):
+    context_class = Context
+    debug = False
+    profiling = False
+
+    id_allocator = None
+    responder = None
+    log_forwarder = None
+    route_monitor = None
+
+    def upgrade(self, importer, parent):
+        LOG.debug('upgrading %r with capabilities to start new children', self)
+        self.id_allocator = ChildIdAllocator(router=self)
+        self.responder = ModuleForwarder(
+            router=self,
+            parent_context=parent,
+            importer=importer,
+        )
+        self.route_monitor = RouteMonitor(self, parent)
+        self.add_handler(
+            fn=self._on_detaching,
+            handle=mitogen.core.DETACHING,
+            persist=True,
+        )
+
+    def _on_detaching(self, msg):
+        if msg.is_dead:
+            return
+        stream = self.stream_by_id(msg.src_id)
+        if stream.protocol.remote_id != msg.src_id or stream.conn.detached:
+            LOG.warning('bad DETACHING received on %r: %r', stream, msg)
+            return
+        LOG.debug('%r: marking as detached', stream)
+        stream.conn.detached = True
+        msg.reply(None)
+
+    def get_streams(self):
+        """
+        Return an atomic snapshot of all streams in existence at time of call.
+        This is safe to call from any thread.
+        """
+        self._write_lock.acquire()
+        try:
+            return itervalues(self._stream_by_id)
+        finally:
+            self._write_lock.release()
+
+    def disconnect(self, context):
+        """
+        Disconnect a context and forget its stream, assuming the context is
+        directly connected.
+        """
+        stream = self.stream_by_id(context)
+        if stream is None or stream.protocol.remote_id != context.context_id:
+            return
+
+        l = mitogen.core.Latch()
+        mitogen.core.listen(stream, 'disconnect', l.put)
+        def disconnect():
+            LOG.debug('Starting disconnect of %r', stream)
+            stream.on_disconnect(self.broker)
+        self.broker.defer(disconnect)
+        l.get()
+
+    def add_route(self, target_id, stream):
+        """
+        Arrange for messages whose `dst_id` is `target_id` to be forwarded on a
+        directly connected :class:`Stream`. Safe to call from any thread.
+
+        This is called automatically by :class:`RouteMonitor` in response to
+        :data:`mitogen.core.ADD_ROUTE` messages, but remains public while the
+        design has not yet settled, and situations may arise where routing is
+        not fully automatic.
+
+        :param int target_id:
+            Target context ID to add a route for.
+        :param mitogen.core.Stream stream:
+            Stream over which messages to the target should be routed.
+        """
+        LOG.debug('%r: adding route to context %r via %r',
+                  self, target_id, stream)
+        assert isinstance(target_id, int)
+        assert isinstance(stream, mitogen.core.Stream)
+
+        self._write_lock.acquire()
+        try:
+            self._stream_by_id[target_id] = stream
+        finally:
+            self._write_lock.release()
+
+    def del_route(self, target_id):
+        """
+        Delete any route that exists for `target_id`. It is not an error to
+        delete a route that does not currently exist. Safe to call from any
+        thread.
+
+        This is called automatically by :class:`RouteMonitor` in response to
+        :data:`mitogen.core.DEL_ROUTE` messages, but remains public while the
+        design has not yet settled, and situations may arise where routing is
+        not fully automatic.
+
+        :param int target_id:
+            Target context ID to delete route for.
+        """
+        LOG.debug('%r: deleting route to %r', self, target_id)
+        # DEL_ROUTE may be sent by a parent if it knows this context sent
+        # messages to a peer that has now disconnected, to let us raise
+        # 'disconnect' event on the appropriate Context instance. In that case,
+        # we won't a matching _stream_by_id entry for the disappearing route,
+        # so don't raise an error for a missing key here.
+        self._write_lock.acquire()
+        try:
+            self._stream_by_id.pop(target_id, None)
+        finally:
+            self._write_lock.release()
+
+    def get_module_blacklist(self):
+        if mitogen.context_id == 0:
+            return self.responder.blacklist
+        return self.importer.master_blacklist
+
+    def get_module_whitelist(self):
+        if mitogen.context_id == 0:
+            return self.responder.whitelist
+        return self.importer.master_whitelist
+
+    def allocate_id(self):
+        return self.id_allocator.allocate()
+
+    connection_timeout_msg = u"Connection timed out."
+
+    def _connect(self, klass, **kwargs):
+        context_id = self.allocate_id()
+        context = self.context_class(self, context_id)
+        context.name = kwargs.get('name')
+
+        kwargs['old_router'] = self
+        kwargs['max_message_size'] = self.max_message_size
+        conn = klass(klass.options_class(**kwargs), self)
+        try:
+            conn.connect(context=context)
+        except mitogen.core.TimeoutError:
+            raise mitogen.core.StreamError(self.connection_timeout_msg)
+
+        return context
+
+    def connect(self, method_name, name=None, **kwargs):
+        if name:
+            name = mitogen.core.to_text(name)
+
+        klass = get_connection_class(method_name)
+        kwargs.setdefault(u'debug', self.debug)
+        kwargs.setdefault(u'profiling', self.profiling)
+        kwargs.setdefault(u'unidirectional', self.unidirectional)
+        kwargs.setdefault(u'name', name)
+
+        via = kwargs.pop(u'via', None)
+        if via is not None:
+            return self.proxy_connect(via, method_name,
+                **mitogen.core.Kwargs(kwargs))
+        return self._connect(klass, **mitogen.core.Kwargs(kwargs))
+
+    def proxy_connect(self, via_context, method_name, name=None, **kwargs):
+        resp = via_context.call(_proxy_connect,
+            name=name,
+            method_name=method_name,
+            kwargs=mitogen.core.Kwargs(kwargs),
+        )
+        if resp['msg'] is not None:
+            raise mitogen.core.StreamError(resp['msg'])
+
+        name = u'%s.%s' % (via_context.name, resp['name'])
+        context = self.context_class(self, resp['id'], name=name)
+        context.via = via_context
+        self._write_lock.acquire()
+        try:
+            self._context_by_id[context.context_id] = context
+        finally:
+            self._write_lock.release()
+        return context
+
+    def buildah(self, **kwargs):
+        return self.connect(u'buildah', **kwargs)
+
+    def doas(self, **kwargs):
+        return self.connect(u'doas', **kwargs)
+
+    def docker(self, **kwargs):
+        return self.connect(u'docker', **kwargs)
+
+    def kubectl(self, **kwargs):
+        return self.connect(u'kubectl', **kwargs)
+
+    def fork(self, **kwargs):
+        return self.connect(u'fork', **kwargs)
+
+    def jail(self, **kwargs):
+        return self.connect(u'jail', **kwargs)
+
+    def local(self, **kwargs):
+        return self.connect(u'local', **kwargs)
+
+    def lxc(self, **kwargs):
+        return self.connect(u'lxc', **kwargs)
+
+    def lxd(self, **kwargs):
+        return self.connect(u'lxd', **kwargs)
+
+    def setns(self, **kwargs):
+        return self.connect(u'setns', **kwargs)
+
+    def su(self, **kwargs):
+        return self.connect(u'su', **kwargs)
+
+    def sudo(self, **kwargs):
+        return self.connect(u'sudo', **kwargs)
+
+    def ssh(self, **kwargs):
+        return self.connect(u'ssh', **kwargs)
+
+
+class Reaper(object):
+    """
+    Asynchronous logic for reaping :class:`Process` objects. This is necessary
+    to prevent uncontrolled buildup of zombie processes in long-lived parents
+    that will eventually reach an OS limit, preventing creation of new threads
+    and processes, and to log the exit status of the child in the case of an
+    error.
+
+    To avoid modifying process-global state such as with
+    :func:`signal.set_wakeup_fd` or installing a :data:`signal.SIGCHLD` handler
+    that might interfere with the user's ability to use those facilities,
+    Reaper polls for exit with backoff using timers installed on an associated
+    :class:`Broker`.
+
+    :param mitogen.core.Broker broker:
+        The :class:`Broker` on which to install timers
+    :param mitogen.parent.Process proc:
+        The process to reap.
+    :param bool kill:
+        If :data:`True`, send ``SIGTERM`` and ``SIGKILL`` to the process.
+    :param bool wait_on_shutdown:
+        If :data:`True`, delay :class:`Broker` shutdown if child has not yet
+        exited. If :data:`False` simply forget the child.
+    """
+    #: :class:`Timer` that invokes :meth:`reap` after some polling delay.
+    _timer = None
+
+    def __init__(self, broker, proc, kill, wait_on_shutdown):
+        self.broker = broker
+        self.proc = proc
+        self.kill = kill
+        self.wait_on_shutdown = wait_on_shutdown
+        self._tries = 0
+
+    def _signal_child(self, signum):
+        # For processes like sudo we cannot actually send sudo a signal,
+        # because it is setuid, so this is best-effort only.
+        LOG.debug('%r: sending %s', self.proc, SIGNAL_BY_NUM[signum])
+        try:
+            os.kill(self.proc.pid, signum)
+        except OSError:
+            e = sys.exc_info()[1]
+            if e.args[0] != errno.EPERM:
+                raise
+
+    def _calc_delay(self, count):
+        """
+        Calculate a poll delay given `count` attempts have already been made.
+        These constants have no principle, they just produce rapid but still
+        relatively conservative retries.
+        """
+        delay = 0.05
+        for _ in xrange(count):
+            delay *= 1.72
+        return delay
+
+    def _on_broker_shutdown(self):
+        """
+        Respond to :class:`Broker` shutdown by cancelling the reap timer if
+        :attr:`Router.await_children_at_shutdown` is disabled. Otherwise
+        shutdown is delayed for up to :attr:`Broker.shutdown_timeout` for
+        subprocesses may have no intention of exiting any time soon.
+        """
+        if not self.wait_on_shutdown:
+            self._timer.cancel()
+
+    def _install_timer(self, delay):
+        new = self._timer is None
+        self._timer = self.broker.timers.schedule(
+            when=mitogen.core.now() + delay,
+            func=self.reap,
+        )
+        if new:
+            mitogen.core.listen(self.broker, 'shutdown',
+                                self._on_broker_shutdown)
+
+    def _remove_timer(self):
+        if self._timer and self._timer.active:
+            self._timer.cancel()
+            mitogen.core.unlisten(self.broker, 'shutdown',
+                                  self._on_broker_shutdown)
+
+    def reap(self):
+        """
+        Reap the child process during disconnection.
+        """
+        status = self.proc.poll()
+        if status is not None:
+            LOG.debug('%r: %s', self.proc, returncode_to_str(status))
+            mitogen.core.fire(self.proc, 'exit')
+            self._remove_timer()
+            return
+
+        self._tries += 1
+        if self._tries > 20:
+            LOG.warning('%r: child will not exit, giving up', self)
+            self._remove_timer()
+            return
+
+        delay = self._calc_delay(self._tries - 1)
+        LOG.debug('%r still running after IO disconnect, recheck in %.03fs',
+                  self.proc, delay)
+        self._install_timer(delay)
+
+        if not self.kill:
+            pass
+        elif self._tries == 2:
+            self._signal_child(signal.SIGTERM)
+        elif self._tries == 6:  # roughly 4 seconds
+            self._signal_child(signal.SIGKILL)
+
+
+class Process(object):
+    """
+    Process objects provide a uniform interface to the :mod:`subprocess` and
+    :mod:`mitogen.fork`. This class is extended by :class:`PopenProcess` and
+    :class:`mitogen.fork.Process`.
+
+    :param int pid:
+        The process ID.
+    :param file stdin:
+        File object attached to standard input.
+    :param file stdout:
+        File object attached to standard output.
+    :param file stderr:
+        File object attached to standard error, or :data:`None`.
+    """
+    #: Name of the process used in logs. Set to the stream/context name by
+    #: :class:`Connection`.
+    name = None
+
+    def __init__(self, pid, stdin, stdout, stderr=None):
+        #: The process ID.
+        self.pid = pid
+        #: File object attached to standard input.
+        self.stdin = stdin
+        #: File object attached to standard output.
+        self.stdout = stdout
+        #: File object attached to standard error.
+        self.stderr = stderr
+
+    def __repr__(self):
+        return '%s %s pid %d' % (
+            type(self).__name__,
+            self.name,
+            self.pid,
+        )
+
+    def poll(self):
+        """
+        Fetch the child process exit status, or :data:`None` if it is still
+        running. This should be overridden by subclasses.
+
+        :returns:
+            Exit status in the style of the :attr:`subprocess.Popen.returncode`
+            attribute, i.e. with signals represented by a negative integer.
+        """
+        raise NotImplementedError()
+
+
+class PopenProcess(Process):
+    """
+    :class:`Process` subclass wrapping a :class:`subprocess.Popen` object.
+
+    :param subprocess.Popen proc:
+        The subprocess.
+    """
+    def __init__(self, proc, stdin, stdout, stderr=None):
+        super(PopenProcess, self).__init__(proc.pid, stdin, stdout, stderr)
+        #: The subprocess.
+        self.proc = proc
+
+    def poll(self):
+        return self.proc.poll()
+
+
+class ModuleForwarder(object):
+    """
+    Respond to :data:`mitogen.core.GET_MODULE` requests in a child by
+    forwarding the request to our parent context, or satisfying the request
+    from our local Importer cache.
+    """
+    def __init__(self, router, parent_context, importer):
+        self.router = router
+        self.parent_context = parent_context
+        self.importer = importer
+        router.add_handler(
+            fn=self._on_forward_module,
+            handle=mitogen.core.FORWARD_MODULE,
+            persist=True,
+            policy=mitogen.core.has_parent_authority,
+        )
+        router.add_handler(
+            fn=self._on_get_module,
+            handle=mitogen.core.GET_MODULE,
+            persist=True,
+            policy=is_immediate_child,
+        )
+
+    def __repr__(self):
+        return 'ModuleForwarder'
+
+    def _on_forward_module(self, msg):
+        if msg.is_dead:
+            return
+
+        context_id_s, _, fullname = bytes_partition(msg.data, b('\x00'))
+        fullname = mitogen.core.to_text(fullname)
+        context_id = int(context_id_s)
+        stream = self.router.stream_by_id(context_id)
+        if stream.protocol.remote_id == mitogen.parent_id:
+            LOG.error('%r: dropping FORWARD_MODULE(%d, %r): no route to child',
+                      self, context_id, fullname)
+            return
+
+        if fullname in stream.protocol.sent_modules:
+            return
+
+        LOG.debug('%r._on_forward_module() sending %r to %r via %r',
+                  self, fullname, context_id, stream.protocol.remote_id)
+        self._send_module_and_related(stream, fullname)
+        if stream.protocol.remote_id != context_id:
+            stream.protocol._send(
+                mitogen.core.Message(
+                    data=msg.data,
+                    handle=mitogen.core.FORWARD_MODULE,
+                    dst_id=stream.protocol.remote_id,
+                )
+            )
+
+    def _on_get_module(self, msg):
+        if msg.is_dead:
+            return
+
+        fullname = msg.data.decode('utf-8')
+        LOG.debug('%r: %s requested by context %d', self, fullname, msg.src_id)
+        callback = lambda: self._on_cache_callback(msg, fullname)
+        self.importer._request_module(fullname, callback)
+
+    def _on_cache_callback(self, msg, fullname):
+        stream = self.router.stream_by_id(msg.src_id)
+        LOG.debug('%r: sending %s to %r', self, fullname, stream)
+        self._send_module_and_related(stream, fullname)
+
+    def _send_module_and_related(self, stream, fullname):
+        tup = self.importer._cache[fullname]
+        for related in tup[4]:
+            rtup = self.importer._cache.get(related)
+            if rtup:
+                self._send_one_module(stream, rtup)
+            else:
+                LOG.debug('%r: %s not in cache (for %s)',
+                          self, related, fullname)
+
+        self._send_one_module(stream, tup)
+
+    def _send_one_module(self, stream, tup):
+        if tup[0] not in stream.protocol.sent_modules:
+            stream.protocol.sent_modules.add(tup[0])
+            self.router._async_route(
+                mitogen.core.Message.pickled(
+                    tup,
+                    dst_id=stream.protocol.remote_id,
+                    handle=mitogen.core.LOAD_MODULE,
+                )
+            )
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/profiler.py b/deployment/lib/mitogen-0.2.9/mitogen/profiler.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbf6086ade6649b9f7a5c1bf76961284213ab028
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/profiler.py
@@ -0,0 +1,164 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+mitogen.profiler
+    Record and report cProfile statistics from a run. Creates one aggregated
+    output file, one aggregate containing only workers, and one for the
+    top-level process.
+
+Usage:
+    mitogen.profiler record <dest_path> <tool> [args ..]
+    mitogen.profiler report <dest_path> [sort_mode]
+    mitogen.profiler stat <sort_mode> <tool> [args ..]
+
+Mode:
+    record: Record a trace.
+    report: Report on a previously recorded trace.
+    stat: Record and report in a single step.
+
+Where:
+    dest_path: Filesystem prefix to write .pstats files to.
+    sort_mode: Sorting mode; defaults to "cumulative". See:
+        https://docs.python.org/2/library/profile.html#pstats.Stats.sort_stats
+
+Example:
+    mitogen.profiler record /tmp/mypatch ansible-playbook foo.yml
+    mitogen.profiler dump /tmp/mypatch-worker.pstats
+"""
+
+from __future__ import print_function
+import os
+import pstats
+import shutil
+import subprocess
+import sys
+import tempfile
+import time
+
+
+def try_merge(stats, path):
+    try:
+        stats.add(path)
+        return True
+    except Exception as e:
+        print('%s failed. Will retry. %s' % (path, e))
+        return False
+
+
+def merge_stats(outpath, inpaths):
+    first, rest = inpaths[0], inpaths[1:]
+    for x in range(1):
+        try:
+            stats = pstats.Stats(first)
+        except EOFError:
+            time.sleep(0.2)
+            continue
+
+        print("Writing %r..." % (outpath,))
+        for path in rest:
+            #print("Merging %r into %r.." % (os.path.basename(path), outpath))
+            for x in range(5):
+                if try_merge(stats, path):
+                    break
+                time.sleep(0.2)
+
+    stats.dump_stats(outpath)
+
+
+def generate_stats(outpath, tmpdir):
+    print('Generating stats..')
+    all_paths = []
+    paths_by_ident = {}
+
+    for name in os.listdir(tmpdir):
+        if name.endswith('-dump.pstats'):
+            ident, _, pid = name.partition('-')
+            path = os.path.join(tmpdir, name)
+            all_paths.append(path)
+            paths_by_ident.setdefault(ident, []).append(path)
+
+    merge_stats('%s-all.pstat' % (outpath,), all_paths)
+    for ident, paths in paths_by_ident.items():
+        merge_stats('%s-%s.pstat' % (outpath, ident), paths)
+
+
+def do_record(tmpdir, path, *args):
+    env = os.environ.copy()
+    fmt = '%(identity)s-%(pid)s.%(now)s-dump.%(ext)s'
+    env['MITOGEN_PROFILING'] = '1'
+    env['MITOGEN_PROFILE_FMT'] = os.path.join(tmpdir, fmt)
+    rc = subprocess.call(args, env=env)
+    generate_stats(path, tmpdir)
+    return rc
+
+
+def do_report(tmpdir, path, sort='cumulative'):
+    stats = pstats.Stats(path).sort_stats(sort)
+    stats.print_stats(100)
+
+
+def do_stat(tmpdir, sort, *args):
+    valid_sorts = pstats.Stats.sort_arg_dict_default
+    if sort not in valid_sorts:
+        sys.stderr.write('Invalid sort %r, must be one of %s\n' %
+                         (sort, ', '.join(sorted(valid_sorts))))
+        sys.exit(1)
+
+    outfile = os.path.join(tmpdir, 'combined')
+    do_record(tmpdir, outfile, *args)
+    aggs = ('app.main', 'mitogen.broker', 'mitogen.child_main',
+            'mitogen.service.pool', 'Strategy', 'WorkerProcess',
+            'all')
+    for agg in aggs:
+        path = '%s-%s.pstat' % (outfile, agg)
+        if os.path.exists(path):
+            print()
+            print()
+            print('------ Aggregation %r ------' % (agg,))
+            print()
+            do_report(tmpdir, path, sort)
+            print()
+
+
+def main():
+    if len(sys.argv) < 2 or sys.argv[1] not in ('record', 'report', 'stat'):
+        sys.stderr.write(__doc__.lstrip())
+        sys.exit(1)
+
+    func = globals()['do_' + sys.argv[1]]
+    tmpdir = tempfile.mkdtemp(prefix='mitogen.profiler')
+    try:
+        sys.exit(func(tmpdir, *sys.argv[2:]) or 0)
+    finally:
+        shutil.rmtree(tmpdir)
+
+if __name__ == '__main__':
+    main()
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/select.py b/deployment/lib/mitogen-0.2.9/mitogen/select.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d87574f3e9cc3e844c13ec1e76c2e45aef07661
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/select.py
@@ -0,0 +1,348 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import mitogen.core
+
+
+class Error(mitogen.core.Error):
+    pass
+
+
+class Event(object):
+    """
+    Represents one selected event.
+    """
+    #: The first Receiver or Latch the event traversed.
+    source = None
+
+    #: The :class:`mitogen.core.Message` delivered to a receiver, or the object
+    #: posted to a latch.
+    data = None
+
+
+class Select(object):
+    """
+    Support scatter/gather asynchronous calls and waiting on multiple
+    :class:`receivers <mitogen.core.Receiver>`,
+    :class:`channels <mitogen.core.Channel>`,
+    :class:`latches <mitogen.core.Latch>`, and
+    :class:`sub-selects <Select>`.
+
+    If `oneshot` is :data:`True`, then remove each receiver as it yields a
+    result; since :meth:`__iter__` terminates once the final receiver is
+    removed, this makes it convenient to respond to calls made in parallel::
+
+        total = 0
+        recvs = [c.call_async(long_running_operation) for c in contexts]
+
+        for msg in mitogen.select.Select(recvs):
+            print('Got %s from %s' % (msg, msg.receiver))
+            total += msg.unpickle()
+
+        # Iteration ends when last Receiver yields a result.
+        print('Received total %s from %s receivers' % (total, len(recvs)))
+
+    :class:`Select` may drive a long-running scheduler:
+
+    .. code-block:: python
+
+        with mitogen.select.Select(oneshot=False) as select:
+            while running():
+                for msg in select:
+                    process_result(msg.receiver.context, msg.unpickle())
+                for context, workfunc in get_new_work():
+                    select.add(context.call_async(workfunc))
+
+    :class:`Select` may be nested:
+
+    .. code-block:: python
+
+        subselects = [
+            mitogen.select.Select(get_some_work()),
+            mitogen.select.Select(get_some_work()),
+            mitogen.select.Select([
+                mitogen.select.Select(get_some_work()),
+                mitogen.select.Select(get_some_work())
+            ])
+        ]
+
+        for msg in mitogen.select.Select(selects):
+            print(msg.unpickle())
+
+    :class:`Select` may be used to mix inter-thread and inter-process IO::
+
+        latch = mitogen.core.Latch()
+        start_thread(latch)
+        recv = remote_host.call_async(os.getuid)
+
+        sel = Select([latch, recv])
+        event = sel.get_event()
+        if event.source is latch:
+            # woken by a local thread
+        else:
+            # woken by function call result
+    """
+
+    notify = None
+
+    def __init__(self, receivers=(), oneshot=True):
+        self._receivers = []
+        self._oneshot = oneshot
+        self._latch = mitogen.core.Latch()
+        for recv in receivers:
+            self.add(recv)
+
+    @classmethod
+    def all(cls, receivers):
+        """
+        Take an iterable of receivers and retrieve a :class:`Message
+        <mitogen.core.Message>` from each, returning the result of calling
+        :meth:`Message.unpickle() <mitogen.core.Message.unpickle>` on each in
+        turn. Results are returned in the order they arrived.
+
+        This is sugar for handling batch :meth:`Context.call_async
+        <mitogen.parent.Context.call_async>` invocations:
+
+        .. code-block:: python
+
+            print('Total disk usage: %.02fMiB' % (sum(
+                mitogen.select.Select.all(
+                    context.call_async(get_disk_usage)
+                    for context in contexts
+                ) / 1048576.0
+            ),))
+
+        However, unlike in a naive comprehension such as:
+
+        .. code-block:: python
+
+            recvs = [c.call_async(get_disk_usage) for c in contexts]
+            sum(recv.get().unpickle() for recv in recvs)
+
+        Result processing happens in the order results arrive, rather than the
+        order requests were issued, so :meth:`all` should always be faster.
+        """
+        return list(msg.unpickle() for msg in cls(receivers))
+
+    def _put(self, value):
+        self._latch.put(value)
+        if self.notify:
+            self.notify(self)
+
+    def __bool__(self):
+        """
+        Return :data:`True` if any receivers are registered with this select.
+        """
+        return bool(self._receivers)
+
+    __nonzero__ = __bool__
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, e_type, e_val, e_tb):
+        self.close()
+
+    def iter_data(self):
+        """
+        Yield :attr:`Event.data` until no receivers remain in the select,
+        either because `oneshot` is :data:`True`, or each receiver was
+        explicitly removed via :meth:`remove`.
+
+        :meth:`__iter__` is an alias for :meth:`iter_data`, allowing loops
+        like::
+
+            for msg in Select([recv1, recv2]):
+                print msg.unpickle()
+        """
+        while self._receivers:
+            yield self.get_event().data
+
+    __iter__ = iter_data
+
+    def iter_events(self):
+        """
+        Yield :class:`Event` instances until no receivers remain in the select.
+        """
+        while self._receivers:
+            yield self.get_event()
+
+    loop_msg = 'Adding this Select instance would create a Select cycle'
+
+    def _check_no_loop(self, recv):
+        if recv is self:
+            raise Error(self.loop_msg)
+
+        for recv_ in self._receivers:
+            if recv_ == recv:
+                raise Error(self.loop_msg)
+            if isinstance(recv_, Select):
+                recv_._check_no_loop(recv)
+
+    owned_msg = 'Cannot add: Receiver is already owned by another Select'
+
+    def add(self, recv):
+        """
+        Add a :class:`mitogen.core.Receiver`, :class:`Select` or
+        :class:`mitogen.core.Latch` to the select.
+
+        :raises mitogen.select.Error:
+            An attempt was made to add a :class:`Select` to which this select
+            is indirectly a member of.
+        """
+        if isinstance(recv, Select):
+            recv._check_no_loop(self)
+
+        self._receivers.append(recv)
+        if recv.notify is not None:
+            raise Error(self.owned_msg)
+
+        recv.notify = self._put
+        # After installing the notify function, _put() will potentially begin
+        # receiving calls from other threads immediately, but not for items
+        # they already had buffered. For those we call _put(), possibly
+        # duplicating the effect of other _put() being made concurrently, such
+        # that the Select ends up with more items in its buffer than exist in
+        # the underlying receivers. We handle the possibility of receivers
+        # marked notified yet empty inside Select.get(), so this should be
+        # robust.
+        for _ in range(recv.size()):
+            self._put(recv)
+
+    not_present_msg = 'Instance is not a member of this Select'
+
+    def remove(self, recv):
+        """
+        Remove an object from from the select. Note that if the receiver has
+        notified prior to :meth:`remove`, it will still be returned by a
+        subsequent :meth:`get`. This may change in a future version.
+        """
+        try:
+            if recv.notify != self._put:
+                raise ValueError
+            self._receivers.remove(recv)
+            recv.notify = None
+        except (IndexError, ValueError):
+            raise Error(self.not_present_msg)
+
+    def close(self):
+        """
+        Remove the select's notifier function from each registered receiver,
+        mark the associated latch as closed, and cause any thread currently
+        sleeping in :meth:`get` to be woken with
+        :class:`mitogen.core.LatchError`.
+
+        This is necessary to prevent memory leaks in long-running receivers. It
+        is called automatically when the Python :keyword:`with` statement is
+        used.
+        """
+        for recv in self._receivers[:]:
+            self.remove(recv)
+        self._latch.close()
+
+    def size(self):
+        """
+        Return the number of items currently buffered.
+
+        As with :class:`Queue.Queue`, `0` may be returned even though a
+        subsequent call to :meth:`get` will succeed, since a message may be
+        posted at any moment between :meth:`size` and :meth:`get`.
+
+        As with :class:`Queue.Queue`, `>0` may be returned even though a
+        subsequent call to :meth:`get` will block, since another waiting thread
+        may be woken at any moment between :meth:`size` and :meth:`get`.
+        """
+        return sum(recv.size() for recv in self._receivers)
+
+    def empty(self):
+        """
+        Return `size() == 0`.
+
+        .. deprecated:: 0.2.8
+           Use :meth:`size` instead.
+        """
+        return self._latch.empty()
+
+    empty_msg = 'Cannot get(), Select instance is empty'
+
+    def get(self, timeout=None, block=True):
+        """
+        Call `get_event(timeout, block)` returning :attr:`Event.data` of the
+        first available event.
+        """
+        return self.get_event(timeout, block).data
+
+    def get_event(self, timeout=None, block=True):
+        """
+        Fetch the next available :class:`Event` from any source, or raise
+        :class:`mitogen.core.TimeoutError` if no value is available within
+        `timeout` seconds.
+
+        On success, the message's :attr:`receiver
+        <mitogen.core.Message.receiver>` attribute is set to the receiver.
+
+        :param float timeout:
+            Timeout in seconds.
+        :param bool block:
+            If :data:`False`, immediately raise
+            :class:`mitogen.core.TimeoutError` if the select is empty.
+        :return:
+            :class:`Event`.
+        :raises mitogen.core.TimeoutError:
+            Timeout was reached.
+        :raises mitogen.core.LatchError:
+            :meth:`close` has been called, and the underlying latch is no
+            longer valid.
+        """
+        if not self._receivers:
+            raise Error(self.empty_msg)
+
+        while True:
+            recv = self._latch.get(timeout=timeout, block=block)
+            try:
+                if isinstance(recv, Select):
+                    event = recv.get_event(block=False)
+                else:
+                    event = Event()
+                    event.source = recv
+                    event.data = recv.get(block=False)
+                if self._oneshot:
+                    self.remove(recv)
+                if isinstance(recv, mitogen.core.Receiver):
+                    # Remove in 0.3.x.
+                    event.data.receiver = recv
+                return event
+            except mitogen.core.TimeoutError:
+                # A receiver may have been queued with no result if another
+                # thread drained it before we woke up, or because another
+                # thread drained it between add() calling recv.empty() and
+                # self._put(), or because Select.add() caused duplicate _put()
+                # calls. In this case simply retry.
+                continue
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/service.py b/deployment/lib/mitogen-0.2.9/mitogen/service.py
new file mode 100644
index 0000000000000000000000000000000000000000..6bd64eb0c03ed0d1676ad7354bcd11e391badafc
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/service.py
@@ -0,0 +1,1146 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import grp
+import logging
+import os
+import os.path
+import pprint
+import pwd
+import stat
+import sys
+import threading
+
+import mitogen.core
+import mitogen.select
+from mitogen.core import b
+from mitogen.core import str_rpartition
+
+try:
+    all
+except NameError:
+    def all(it):
+        for elem in it:
+            if not elem:
+                return False
+        return True
+
+
+LOG = logging.getLogger(__name__)
+
+_pool = None
+_pool_pid = None
+#: Serialize pool construction.
+_pool_lock = threading.Lock()
+
+
+if mitogen.core.PY3:
+    def func_code(func):
+        return func.__code__
+    def func_name(func):
+        return func.__name__
+else:
+    def func_code(func):
+        return func.func_code
+    def func_name(func):
+        return func.func_name
+
+
+@mitogen.core.takes_router
+def get_or_create_pool(size=None, router=None):
+    global _pool
+    global _pool_pid
+
+    my_pid = os.getpid()
+    if _pool is None or _pool.closed or my_pid != _pool_pid:
+        # Avoid acquiring heavily contended lock if possible.
+        _pool_lock.acquire()
+        try:
+            if _pool_pid != my_pid:
+                _pool = Pool(
+                    router,
+                    services=[],
+                    size=size or 2,
+                    overwrite=True,
+                    recv=mitogen.core.Dispatcher._service_recv,
+                )
+                # In case of Broker shutdown crash, Pool can cause 'zombie'
+                # processes.
+                mitogen.core.listen(router.broker, 'shutdown',
+                                    lambda: _pool.stop(join=True))
+                _pool_pid = os.getpid()
+        finally:
+            _pool_lock.release()
+
+    return _pool
+
+
+def get_thread_name():
+    return threading.currentThread().getName()
+
+
+def call(service_name, method_name, call_context=None, **kwargs):
+    """
+    Call a service registered with this pool, using the calling thread as a
+    host.
+    """
+    if isinstance(service_name, mitogen.core.BytesType):
+        service_name = service_name.encode('utf-8')
+    elif not isinstance(service_name, mitogen.core.UnicodeType):
+        service_name = service_name.name()  # Service.name()
+
+    if call_context:
+        return call_context.call_service(service_name, method_name, **kwargs)
+    else:
+        pool = get_or_create_pool()
+        invoker = pool.get_invoker(service_name, msg=None)
+        return getattr(invoker.service, method_name)(**kwargs)
+
+
+def validate_arg_spec(spec, args):
+    for name in spec:
+        try:
+            obj = args[name]
+        except KeyError:
+            raise mitogen.core.CallError(
+                'Required argument %r missing.' % (name,)
+            )
+
+        if not isinstance(obj, spec[name]):
+            raise mitogen.core.CallError(
+                'Argument %r type incorrect, got %r, expected %r' % (
+                    name,
+                    type(obj),
+                    spec[name]
+                )
+            )
+
+
+def arg_spec(spec):
+    """
+    Annotate a method as requiring arguments with a specific type. This only
+    validates required arguments. For optional arguments, write a manual check
+    within the function.
+
+    ::
+
+        @mitogen.service.arg_spec({
+            'path': str
+        })
+        def fetch_path(self, path, optional=None):
+            ...
+
+    :param dict spec:
+        Mapping from argument name to expected type.
+    """
+    def wrapper(func):
+        func.mitogen_service__arg_spec = spec
+        return func
+    return wrapper
+
+
+def expose(policy):
+    """
+    Annotate a method to permit access to contexts matching an authorization
+    policy. The annotation may be specified multiple times. Methods lacking any
+    authorization policy are not accessible.
+
+    ::
+
+        @mitogen.service.expose(policy=mitogen.service.AllowParents())
+        def unsafe_operation(self):
+            ...
+
+    :param mitogen.service.Policy policy:
+        The policy to require.
+    """
+    def wrapper(func):
+        func.mitogen_service__policies = (
+            [policy] +
+            getattr(func, 'mitogen_service__policies', [])
+        )
+        return func
+    return wrapper
+
+
+def no_reply():
+    """
+    Annotate a method as one that does not generate a response. Messages sent
+    by the method are done so explicitly. This can be used for fire-and-forget
+    endpoints where the requestee never receives a reply.
+    """
+    def wrapper(func):
+        func.mitogen_service__no_reply = True
+        return func
+    return wrapper
+
+
+class Error(Exception):
+    """
+    Raised when an error occurs configuring a service or pool.
+    """
+    pass  # cope with minify_source() bug.
+
+
+class Policy(object):
+    """
+    Base security policy.
+    """
+    def is_authorized(self, service, msg):
+        raise NotImplementedError()
+
+
+class AllowAny(Policy):
+    def is_authorized(self, service, msg):
+        return True
+
+
+class AllowParents(Policy):
+    def is_authorized(self, service, msg):
+        return (msg.auth_id in mitogen.parent_ids or
+                msg.auth_id == mitogen.context_id)
+
+
+class Activator(object):
+    """
+    """
+    def is_permitted(self, mod_name, class_name, msg):
+        return mitogen.core.has_parent_authority(msg)
+
+    not_active_msg = (
+        'Service %r is not yet activated in this context, and the '
+        'caller is not privileged, therefore autoactivation is disabled.'
+    )
+
+    def activate(self, pool, service_name, msg):
+        mod_name, _, class_name = str_rpartition(service_name, '.')
+        if msg and not self.is_permitted(mod_name, class_name, msg):
+            raise mitogen.core.CallError(self.not_active_msg, service_name)
+
+        module = mitogen.core.import_module(mod_name)
+        klass = getattr(module, class_name)
+        service = klass(router=pool.router)
+        pool.add(service)
+        return service
+
+
+class Invoker(object):
+    def __init__(self, service):
+        self.service = service
+
+    def __repr__(self):
+        return '%s(%s)' % (type(self).__name__, self.service)
+
+    unauthorized_msg = (
+        'Caller is not authorized to invoke %r of service %r'
+    )
+
+    def _validate(self, method_name, kwargs, msg):
+        method = getattr(self.service, method_name, None)
+        if method is None:
+            raise mitogen.core.CallError('No such method: %r', method_name)
+
+        policies = getattr(method, 'mitogen_service__policies', None)
+        if not policies:
+            raise mitogen.core.CallError('Method has no policies set.')
+
+        if msg is not None:
+            if not all(p.is_authorized(self.service, msg) for p in policies):
+                raise mitogen.core.CallError(
+                    self.unauthorized_msg,
+                    method_name,
+                    self.service.name()
+                )
+
+        required = getattr(method, 'mitogen_service__arg_spec', {})
+        validate_arg_spec(required, kwargs)
+
+    def _invoke(self, method_name, kwargs, msg):
+        method = getattr(self.service, method_name)
+        if 'msg' in func_code(method).co_varnames:
+            kwargs['msg'] = msg  # TODO: hack
+
+        no_reply = getattr(method, 'mitogen_service__no_reply', False)
+        ret = None
+        try:
+            ret = method(**kwargs)
+            if no_reply:
+                return Service.NO_REPLY
+            return ret
+        except Exception:
+            if no_reply:
+                LOG.exception('While calling no-reply method %s.%s',
+                              self.service.name(),
+                              func_name(method))
+            else:
+                raise
+
+    def invoke(self, method_name, kwargs, msg):
+        self._validate(method_name, kwargs, msg)
+        response = self._invoke(method_name, kwargs, msg)
+        if response is not Service.NO_REPLY:
+            msg.reply(response)
+
+
+class SerializedInvoker(Invoker):
+    def __init__(self, **kwargs):
+        super(SerializedInvoker, self).__init__(**kwargs)
+        self._lock = threading.Lock()
+        self._queue = []
+        self._running = False
+
+    def _pop(self):
+        self._lock.acquire()
+        try:
+            try:
+                return self._queue.pop(0)
+            except IndexError:
+                self._running = False
+        finally:
+            self._lock.release()
+
+    def _run(self):
+        while True:
+            tup = self._pop()
+            if tup is None:
+                return
+            method_name, kwargs, msg = tup
+            try:
+                super(SerializedInvoker, self).invoke(method_name, kwargs, msg)
+            except mitogen.core.CallError:
+                e = sys.exc_info()[1]
+                LOG.warning('%r: call error: %s: %s', self, msg, e)
+                msg.reply(e)
+            except Exception:
+                LOG.exception('%r: while invoking %s()', self, method_name)
+                msg.reply(mitogen.core.Message.dead())
+
+    def invoke(self, method_name, kwargs, msg):
+        self._lock.acquire()
+        try:
+            self._queue.append((method_name, kwargs, msg))
+            first = not self._running
+            self._running = True
+        finally:
+            self._lock.release()
+
+        if first:
+            self._run()
+        return Service.NO_REPLY
+
+
+class DeduplicatingInvoker(Invoker):
+    """
+    A service that deduplicates and caches expensive responses. Requests are
+    deduplicated according to a customizable key, and the single expensive
+    response is broadcast to all requestors.
+
+    A side effect of this class is that processing of the single response is
+    always serialized according to the result of :py:meth:`key_from_request`.
+
+    Only one pool thread is blocked during generation of the response,
+    regardless of the number of requestors.
+    """
+    def __init__(self, service):
+        super(DeduplicatingInvoker, self).__init__(service)
+        self._responses = {}
+        self._waiters = {}
+        self._lock = threading.Lock()
+
+    def key_from_request(self, method_name, kwargs):
+        """
+        Generate a deduplication key from the request. The default
+        implementation returns a string based on a stable representation of the
+        input dictionary generated by :py:func:`pprint.pformat`.
+        """
+        return pprint.pformat((method_name, kwargs))
+
+    def get_response(self, args):
+        raise NotImplementedError()
+
+    def _produce_response(self, key, response):
+        self._lock.acquire()
+        try:
+            assert key not in self._responses
+            assert key in self._waiters
+            self._responses[key] = response
+            for msg in self._waiters.pop(key):
+                msg.reply(response)
+        finally:
+            self._lock.release()
+
+    def _invoke(self, method_name, kwargs, msg):
+        key = self.key_from_request(method_name, kwargs)
+        self._lock.acquire()
+        try:
+            if key in self._responses:
+                return self._responses[key]
+
+            if key in self._waiters:
+                self._waiters[key].append(msg)
+                return Service.NO_REPLY
+
+            self._waiters[key] = [msg]
+        finally:
+            self._lock.release()
+
+        # I'm the unlucky thread that must generate the response.
+        try:
+            response = getattr(self, method_name)(**kwargs)
+            self._produce_response(key, response)
+        except mitogen.core.CallError:
+            e = sys.exc_info()[1]
+            self._produce_response(key, e)
+        except Exception:
+            e = sys.exc_info()[1]
+            self._produce_response(key, mitogen.core.CallError(e))
+
+        return Service.NO_REPLY
+
+
+class Service(object):
+    #: Sentinel object to suppress reply generation, since returning
+    #: :data:`None` will trigger a response message containing the pickled
+    #: :data:`None`.
+    NO_REPLY = object()
+
+    invoker_class = Invoker
+
+    @classmethod
+    def name(cls):
+        return u'%s.%s' % (cls.__module__, cls.__name__)
+
+    def __init__(self, router):
+        self.router = router
+        self.select = mitogen.select.Select()
+
+    def __repr__(self):
+        return '%s()' % (self.__class__.__name__,)
+
+    def on_message(self, event):
+        """
+        Called when a message arrives on any of :attr:`select`'s registered
+        receivers.
+
+        :param mitogen.select.Event event:
+        """
+        pass
+
+    def on_shutdown(self):
+        """
+        Called by Pool.shutdown() once the last worker thread has exitted.
+        """
+        pass
+
+
+class Pool(object):
+    """
+    Manage a pool of at least one thread that will be used to process messages
+    for a collection of services.
+
+    Internally this is implemented by subscribing every :py:class:`Service`'s
+    :py:class:`mitogen.core.Receiver` using a single
+    :py:class:`mitogen.select.Select`, then arranging for every thread to
+    consume messages delivered to that select.
+
+    In this way the threads are fairly shared by all available services, and no
+    resources are dedicated to a single idle service.
+
+    There is no penalty for exposing large numbers of services; the list of
+    exposed services could even be generated dynamically in response to your
+    program's configuration or its input data.
+
+    :param mitogen.core.Router router:
+        :class:`mitogen.core.Router` to listen for
+        :data:`mitogen.core.CALL_SERVICE` messages.
+    :param list services:
+        Initial list of services to register.
+    :param mitogen.core.Receiver recv:
+        :data:`mitogen.core.CALL_SERVICE` receiver to reuse. This is used by
+        :func:`get_or_create_pool` to hand off a queue of messages from the
+        Dispatcher stub handler while avoiding a race.
+    """
+    activator_class = Activator
+
+    def __init__(self, router, services=(), size=1, overwrite=False,
+                 recv=None):
+        self.router = router
+        self._activator = self.activator_class()
+        self._ipc_latch = mitogen.core.Latch()
+        self._receiver = mitogen.core.Receiver(
+            router=router,
+            handle=mitogen.core.CALL_SERVICE,
+            overwrite=overwrite,
+        )
+
+        self._select = mitogen.select.Select(oneshot=False)
+        self._select.add(self._receiver)
+        self._select.add(self._ipc_latch)
+        #: Serialize service construction.
+        self._lock = threading.Lock()
+        self._func_by_source = {
+            self._receiver: self._on_service_call,
+            self._ipc_latch: self._on_ipc_latch,
+        }
+        self._invoker_by_name = {}
+
+        if recv is not None:
+            # When inheriting from mitogen.core.Dispatcher, we must remove its
+            # stub notification function before adding it to our Select. We
+            # always overwrite this receiver since the standard service.Pool
+            # handler policy differs from the one inherited from
+            # core.Dispatcher.
+            recv.notify = None
+            self._select.add(recv)
+            self._func_by_source[recv] = self._on_service_call
+
+        for service in services:
+            self.add(service)
+        self._py_24_25_compat()
+        self._threads = []
+        for x in range(size):
+            name = 'mitogen.Pool.%04x.%d' % (id(self) & 0xffff, x,)
+            thread = threading.Thread(
+                name=name,
+                target=mitogen.core._profile_hook,
+                args=('mitogen.service.pool', self._worker_main),
+            )
+            thread.start()
+            self._threads.append(thread)
+        LOG.debug('%r: initialized', self)
+
+    def _py_24_25_compat(self):
+        if sys.version_info < (2, 6):
+            # import_module() is used to avoid dep scanner sending mitogen.fork
+            # to all mitogen.service importers.
+            os_fork = mitogen.core.import_module('mitogen.os_fork')
+            os_fork._notice_broker_or_pool(self)
+
+    @property
+    def size(self):
+        return len(self._threads)
+
+    def add(self, service):
+        name = service.name()
+        if name in self._invoker_by_name:
+            raise Error('service named %r already registered' % (name,))
+        assert service.select not in self._func_by_source
+        invoker = service.invoker_class(service=service)
+        self._invoker_by_name[name] = invoker
+        self._func_by_source[service.select] = service.on_message
+
+    closed = False
+
+    def stop(self, join=True):
+        self.closed = True
+        self._receiver.close()
+        self._select.close()
+        if join:
+            self.join()
+
+    def join(self):
+        for th in self._threads:
+            th.join()
+        for invoker in self._invoker_by_name.values():
+            invoker.service.on_shutdown()
+
+    def get_invoker(self, name, msg):
+        invoker = self._invoker_by_name.get(name)
+        if invoker is None:
+            # Avoid acquiring lock if possible.
+            self._lock.acquire()
+            try:
+                invoker = self._invoker_by_name.get(name)
+                if not invoker:
+                    service = self._activator.activate(self, name, msg)
+                    invoker = service.invoker_class(service=service)
+                    self._invoker_by_name[name] = invoker
+            finally:
+                self._lock.release()
+
+        return invoker
+
+    def get_service(self, name):
+        invoker = self.get_invoker(name, None)
+        return invoker.service
+
+    def _validate(self, msg):
+        tup = msg.unpickle(throw=False)
+        if not (isinstance(tup, tuple) and
+                len(tup) == 3 and
+                isinstance(tup[0], mitogen.core.AnyTextType) and
+                isinstance(tup[1], mitogen.core.AnyTextType) and
+                isinstance(tup[2], dict)):
+            raise mitogen.core.CallError('Invalid message format.')
+
+    def defer(self, func, *args, **kwargs):
+        """
+        Arrange for `func(*args, **kwargs)` to be invoked in the context of a
+        service pool thread.
+        """
+        self._ipc_latch.put(lambda: func(*args, **kwargs))
+
+    def _on_ipc_latch(self, event):
+        event.data()
+
+    def _on_service_call(self, event):
+        msg = event.data
+        service_name = None
+        method_name = None
+        try:
+            self._validate(msg)
+            service_name, method_name, kwargs = msg.unpickle()
+            invoker = self.get_invoker(service_name, msg)
+            return invoker.invoke(method_name, kwargs, msg)
+        except mitogen.core.CallError:
+            e = sys.exc_info()[1]
+            LOG.warning('%r: call error: %s: %s', self, msg, e)
+            msg.reply(e)
+        except Exception:
+            LOG.exception('%r: while invoking %r of %r',
+                          self, method_name, service_name)
+            e = sys.exc_info()[1]
+            msg.reply(mitogen.core.CallError(e))
+
+    def _worker_run(self):
+        while not self.closed:
+            try:
+                event = self._select.get_event()
+            except mitogen.core.LatchError:
+                LOG.debug('thread %s exiting gracefully', get_thread_name())
+                return
+            except mitogen.core.ChannelError:
+                LOG.debug('thread %s exiting with error: %s',
+                          get_thread_name(), sys.exc_info()[1])
+                return
+
+            func = self._func_by_source[event.source]
+            try:
+                func(event)
+            except Exception:
+                LOG.exception('While handling %r using %r', event.data, func)
+
+    def _worker_main(self):
+        try:
+            self._worker_run()
+        except Exception:
+            LOG.exception('%r: worker %r crashed', self, get_thread_name())
+            raise
+
+    def __repr__(self):
+        return 'Pool(%04x, size=%d, th=%r)' % (
+            id(self) & 0xffff,
+            len(self._threads),
+            get_thread_name(),
+        )
+
+
+class FileStreamState(object):
+    def __init__(self):
+        #: List of [(Sender, file object)]
+        self.jobs = []
+        self.completing = {}
+        #: In-flight byte count.
+        self.unacked = 0
+        #: Lock.
+        self.lock = threading.Lock()
+
+
+class PushFileService(Service):
+    """
+    Push-based file service. Files are delivered and cached in RAM, sent
+    recursively from parent to child. A child that requests a file via
+    :meth:`get` will block until it has been delivered by a parent.
+
+    This service will eventually be merged into FileService.
+    """
+    def __init__(self, **kwargs):
+        super(PushFileService, self).__init__(**kwargs)
+        self._lock = threading.Lock()
+        self._cache = {}
+        self._waiters = {}
+        self._sent_by_stream = {}
+
+    def get(self, path):
+        """
+        Fetch a file from the cache.
+        """
+        assert isinstance(path, mitogen.core.UnicodeType)
+        self._lock.acquire()
+        try:
+            if path in self._cache:
+                return self._cache[path]
+            latch = mitogen.core.Latch()
+            waiters = self._waiters.setdefault(path, [])
+            waiters.append(lambda: latch.put(None))
+        finally:
+            self._lock.release()
+
+        LOG.debug('%r.get(%r) waiting for uncached file to arrive', self, path)
+        latch.get()
+        LOG.debug('%r.get(%r) -> %r', self, path, self._cache[path])
+        return self._cache[path]
+
+    def _forward(self, context, path):
+        stream = self.router.stream_by_id(context.context_id)
+        child = self.router.context_by_id(stream.protocol.remote_id)
+        sent = self._sent_by_stream.setdefault(stream, set())
+        if path in sent:
+            if child.context_id != context.context_id:
+                LOG.debug('requesting %s forward small file to %s: %s',
+                          child, context, path)
+                child.call_service_async(
+                    service_name=self.name(),
+                    method_name='forward',
+                    path=path,
+                    context=context
+                ).close()
+        else:
+            LOG.debug('requesting %s cache and forward small file to %s: %s',
+                      child, context, path)
+            child.call_service_async(
+                service_name=self.name(),
+                method_name='store_and_forward',
+                path=path,
+                data=self._cache[path],
+                context=context
+            ).close()
+            sent.add(path)
+
+    @expose(policy=AllowParents())
+    @arg_spec({
+        'context': mitogen.core.Context,
+        'paths': list,
+        'modules': list,
+    })
+    def propagate_paths_and_modules(self, context, paths, modules):
+        """
+        One size fits all method to ensure a target context has been preloaded
+        with a set of small files and Python modules.
+        """
+        for path in paths:
+            self.propagate_to(context, mitogen.core.to_text(path))
+        #self.router.responder.forward_modules(context, modules) TODO
+
+    @expose(policy=AllowParents())
+    @arg_spec({
+        'context': mitogen.core.Context,
+        'path': mitogen.core.FsPathTypes,
+    })
+    def propagate_to(self, context, path):
+        if path not in self._cache:
+            LOG.debug('caching small file %s', path)
+            fp = open(path, 'rb')
+            try:
+                self._cache[path] = mitogen.core.Blob(fp.read())
+            finally:
+                fp.close()
+        self._forward(context, path)
+
+    @expose(policy=AllowParents())
+    @no_reply()
+    @arg_spec({
+        'path': mitogen.core.UnicodeType,
+        'data': mitogen.core.Blob,
+        'context': mitogen.core.Context,
+    })
+    def store_and_forward(self, path, data, context):
+        LOG.debug('%r.store_and_forward(%r, %r, %r) %r',
+                  self, path, data, context,
+                  get_thread_name())
+        self._lock.acquire()
+        try:
+            self._cache[path] = data
+            waiters = self._waiters.pop(path, [])
+        finally:
+            self._lock.release()
+
+        if context.context_id != mitogen.context_id:
+            self._forward(context, path)
+        for callback in waiters:
+            callback()
+
+    @expose(policy=AllowParents())
+    @no_reply()
+    @arg_spec({
+        'path': mitogen.core.FsPathTypes,
+        'context': mitogen.core.Context,
+    })
+    def forward(self, path, context):
+        LOG.debug('%r.forward(%r, %r)', self, path, context)
+        func = lambda: self._forward(context, path)
+
+        self._lock.acquire()
+        try:
+            if path in self._cache:
+                func()
+            else:
+                LOG.debug('%r: %r not cached yet, queueing', self, path)
+                self._waiters.setdefault(path, []).append(func)
+        finally:
+            self._lock.release()
+
+
+class FileService(Service):
+    """
+    Streaming file server, used to serve small and huge files alike. Paths must
+    be registered by a trusted context before they will be served to a child.
+
+    Transfers are divided among the physical streams that connect external
+    contexts, ensuring each stream never has excessive data buffered in RAM,
+    while still maintaining enough to fully utilize available bandwidth. This
+    is achieved by making an initial bandwidth assumption, enqueueing enough
+    chunks to fill that assumed pipe, then responding to delivery
+    acknowledgements from the receiver by scheduling new chunks.
+
+    Transfers proceed one-at-a-time per stream. When multiple contexts exist on
+    a stream (e.g. one is the SSH account, another is a sudo account, and a
+    third is a proxied SSH connection), each request is satisfied in turn
+    before subsequent requests start flowing. This ensures when a stream is
+    contended, priority is given to completing individual transfers rather than
+    potentially aborting many partial transfers, causing the bandwidth to be
+    wasted.
+
+    Theory of operation:
+        1. Trusted context (i.e. WorkerProcess) calls register(), making a
+           file available to any untrusted context.
+        2. Requestee context creates a mitogen.core.Receiver() to receive
+           chunks, then calls fetch(path, recv.to_sender()), to set up the
+           transfer.
+        3. fetch() replies to the call with the file's metadata, then
+           schedules an initial burst up to the window size limit (1MiB).
+        4. Chunks begin to arrive in the requestee, which calls acknowledge()
+           for each 128KiB received.
+        5. The acknowledge() call arrives at FileService, which scheduled a new
+           chunk to refill the drained window back to the size limit.
+        6. When the last chunk has been pumped for a single transfer,
+           Sender.close() is called causing the receive loop in
+           target.py::_get_file() to exit, allowing that code to compare the
+           transferred size with the total file size from the metadata.
+        7. If the sizes mismatch, _get_file()'s caller is informed which will
+           discard the result and log/raise an error.
+
+    Shutdown:
+        1. process.py calls service.Pool.shutdown(), which arranges for the
+           service pool threads to exit and be joined, guranteeing no new
+           requests can arrive, before calling Service.on_shutdown() for each
+           registered service.
+        2. FileService.on_shutdown() walks every in-progress transfer and calls
+           Sender.close(), causing Receiver loops in the requestees to exit
+           early. The size check fails and any partially downloaded file is
+           discarded.
+        3. Control exits _get_file() in every target, and graceful shutdown can
+           proceed normally, without the associated thread needing to be
+           forcefully killed.
+    """
+    unregistered_msg = 'Path %r is not registered with FileService.'
+    context_mismatch_msg = 'sender= kwarg context must match requestee context'
+
+    #: Burst size. With 1MiB and 10ms RTT max throughput is 100MiB/sec, which
+    #: is 5x what SSH can handle on a 2011 era 2.4Ghz Core i5.
+    window_size_bytes = 1048576
+
+    def __init__(self, router):
+        super(FileService, self).__init__(router)
+        #: Set of registered paths.
+        self._paths = set()
+        #: Set of registered directory prefixes.
+        self._prefixes = set()
+        #: Mapping of Stream->FileStreamState.
+        self._state_by_stream = {}
+
+    def _name_or_none(self, func, n, attr):
+        try:
+            return getattr(func(n), attr)
+        except KeyError:
+            return None
+
+    @expose(policy=AllowParents())
+    @arg_spec({
+        'path': mitogen.core.FsPathTypes,
+    })
+    def register(self, path):
+        """
+        Authorize a path for access by children. Repeat calls with the same
+        path has no effect.
+
+        :param str path:
+            File path.
+        """
+        if path not in self._paths:
+            LOG.debug('%r: registering %r', self, path)
+            self._paths.add(path)
+
+    @expose(policy=AllowParents())
+    @arg_spec({
+        'path': mitogen.core.FsPathTypes,
+    })
+    def register_prefix(self, path):
+        """
+        Authorize a path and any subpaths for access by children. Repeat calls
+        with the same path has no effect.
+
+        :param str path:
+            File path.
+        """
+        if path not in self._prefixes:
+            LOG.debug('%r: registering prefix %r', self, path)
+            self._prefixes.add(path)
+
+    def _generate_stat(self, path):
+        st = os.stat(path)
+        if not stat.S_ISREG(st.st_mode):
+            raise IOError('%r is not a regular file.' % (path,))
+
+        return {
+            u'size': st.st_size,
+            u'mode': st.st_mode,
+            u'owner': self._name_or_none(pwd.getpwuid, 0, 'pw_name'),
+            u'group': self._name_or_none(grp.getgrgid, 0, 'gr_name'),
+            u'mtime': float(st.st_mtime),  # Python 2.4 uses int.
+            u'atime': float(st.st_atime),  # Python 2.4 uses int.
+        }
+
+    def on_shutdown(self):
+        """
+        Respond to shutdown by sending close() to every target, allowing their
+        receive loop to exit and clean up gracefully.
+        """
+        LOG.debug('%r.on_shutdown()', self)
+        for stream, state in self._state_by_stream.items():
+            state.lock.acquire()
+            try:
+                for sender, fp in reversed(state.jobs):
+                    sender.close()
+                    fp.close()
+                    state.jobs.pop()
+            finally:
+                state.lock.release()
+
+    # The IO loop pumps 128KiB chunks. An ideal message is a multiple of this,
+    # odd-sized messages waste one tiny write() per message on the trailer.
+    # Therefore subtract 10 bytes pickle overhead + 24 bytes header.
+    IO_SIZE = mitogen.core.CHUNK_SIZE - (mitogen.core.Message.HEADER_LEN + (
+        len(
+            mitogen.core.Message.pickled(
+                mitogen.core.Blob(b(' ') * mitogen.core.CHUNK_SIZE)
+            ).data
+        ) - mitogen.core.CHUNK_SIZE
+    ))
+
+    def _schedule_pending_unlocked(self, state):
+        """
+        Consider the pending transfers for a stream, pumping new chunks while
+        the unacknowledged byte count is below :attr:`window_size_bytes`. Must
+        be called with the FileStreamState lock held.
+
+        :param FileStreamState state:
+            Stream to schedule chunks for.
+        """
+        while state.jobs and state.unacked < self.window_size_bytes:
+            sender, fp = state.jobs[0]
+            s = fp.read(self.IO_SIZE)
+            if s:
+                state.unacked += len(s)
+                sender.send(mitogen.core.Blob(s))
+            else:
+                # File is done. Cause the target's receive loop to exit by
+                # closing the sender, close the file, and remove the job entry.
+                sender.close()
+                fp.close()
+                state.jobs.pop(0)
+
+    def _prefix_is_authorized(self, path):
+        """
+        Return the set of all possible directory prefixes for `path`.
+        :func:`os.path.abspath` is used to ensure the path is absolute.
+
+        :param str path:
+            The path.
+        :returns: Set of prefixes.
+        """
+        path = os.path.abspath(path)
+        while True:
+            if path in self._prefixes:
+                return True
+            if path == '/':
+                break
+            path = os.path.dirname(path)
+        return False
+
+    @expose(policy=AllowAny())
+    @no_reply()
+    @arg_spec({
+        'path': mitogen.core.FsPathTypes,
+        'sender': mitogen.core.Sender,
+    })
+    def fetch(self, path, sender, msg):
+        """
+        Start a transfer for a registered path.
+
+        :param str path:
+            File path.
+        :param mitogen.core.Sender sender:
+            Sender to receive file data.
+        :returns:
+            Dict containing the file metadata:
+
+            * ``size``: File size in bytes.
+            * ``mode``: Integer file mode.
+            * ``owner``: Owner account name on host machine.
+            * ``group``: Owner group name on host machine.
+            * ``mtime``: Floating point modification time.
+            * ``ctime``: Floating point change time.
+        :raises Error:
+            Unregistered path, or Sender did not match requestee context.
+        """
+        if (
+            (path not in self._paths) and
+            (not self._prefix_is_authorized(path)) and
+            (not mitogen.core._has_parent_authority(msg.auth_id))
+        ):
+            msg.reply(mitogen.core.CallError(
+                Error(self.unregistered_msg % (path,))
+            ))
+            return
+
+        if msg.src_id != sender.context.context_id:
+            msg.reply(mitogen.core.CallError(
+                Error(self.context_mismatch_msg)
+            ))
+            return
+
+        LOG.debug('Serving %r', path)
+
+        # Response must arrive first so requestee can begin receive loop,
+        # otherwise first ack won't arrive until all pending chunks were
+        # delivered. In that case max BDP would always be 128KiB, aka. max
+        # ~10Mbit/sec over a 100ms link.
+        try:
+            fp = open(path, 'rb', self.IO_SIZE)
+            msg.reply(self._generate_stat(path))
+        except IOError:
+            msg.reply(mitogen.core.CallError(
+                sys.exc_info()[1]
+            ))
+            return
+
+        stream = self.router.stream_by_id(sender.context.context_id)
+        state = self._state_by_stream.setdefault(stream, FileStreamState())
+        state.lock.acquire()
+        try:
+            state.jobs.append((sender, fp))
+            self._schedule_pending_unlocked(state)
+        finally:
+            state.lock.release()
+
+    @expose(policy=AllowAny())
+    @no_reply()
+    @arg_spec({
+        'size': int,
+    })
+    @no_reply()
+    def acknowledge(self, size, msg):
+        """
+        Acknowledge bytes received by a transfer target, scheduling new chunks
+        to keep the window full. This should be called for every chunk received
+        by the target.
+        """
+        stream = self.router.stream_by_id(msg.src_id)
+        state = self._state_by_stream[stream]
+        state.lock.acquire()
+        try:
+            if state.unacked < size:
+                LOG.error('%r.acknowledge(src_id %d): unacked=%d < size %d',
+                          self, msg.src_id, state.unacked, size)
+            state.unacked -= min(state.unacked, size)
+            self._schedule_pending_unlocked(state)
+        finally:
+            state.lock.release()
+
+    @classmethod
+    def get(cls, context, path, out_fp):
+        """
+        Streamily download a file from the connection multiplexer process in
+        the controller.
+
+        :param mitogen.core.Context context:
+            Reference to the context hosting the FileService that will be used
+            to fetch the file.
+        :param bytes path:
+            FileService registered name of the input file.
+        :param bytes out_path:
+            Name of the output path on the local disk.
+        :returns:
+            Tuple of (`ok`, `metadata`), where `ok` is :data:`True` on success,
+            or :data:`False` if the transfer was interrupted and the output
+            should be discarded.
+
+            `metadata` is a dictionary of file metadata as documented in
+            :meth:`fetch`.
+        """
+        LOG.debug('get_file(): fetching %r from %r', path, context)
+        t0 = mitogen.core.now()
+        recv = mitogen.core.Receiver(router=context.router)
+        metadata = context.call_service(
+            service_name=cls.name(),
+            method_name='fetch',
+            path=path,
+            sender=recv.to_sender(),
+        )
+
+        received_bytes = 0
+        for chunk in recv:
+            s = chunk.unpickle()
+            LOG.debug('get_file(%r): received %d bytes', path, len(s))
+            context.call_service_async(
+                service_name=cls.name(),
+                method_name='acknowledge',
+                size=len(s),
+            ).close()
+            out_fp.write(s)
+            received_bytes += len(s)
+
+        ok = received_bytes == metadata['size']
+        if received_bytes < metadata['size']:
+            LOG.error('get_file(%r): receiver was closed early, controller '
+                      'may be shutting down, or the file was truncated '
+                      'during transfer. Expected %d bytes, received %d.',
+                      path, metadata['size'], received_bytes)
+        elif received_bytes > metadata['size']:
+            LOG.error('get_file(%r): the file appears to have grown '
+                      'while transfer was in progress. Expected %d '
+                      'bytes, received %d.',
+                      path, metadata['size'], received_bytes)
+
+        LOG.debug('target.get_file(): fetched %d bytes of %r from %r in %dms',
+                  metadata['size'], path, context,
+                  1000 * (mitogen.core.now() - t0))
+        return ok, metadata
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/setns.py b/deployment/lib/mitogen-0.2.9/mitogen/setns.py
new file mode 100644
index 0000000000000000000000000000000000000000..46a50301d8f5cbece89132c4cb1b8dfe04ce5e84
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/setns.py
@@ -0,0 +1,241 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import ctypes
+import grp
+import logging
+import os
+import pwd
+import subprocess
+import sys
+
+import mitogen.core
+import mitogen.parent
+
+
+LOG = logging.getLogger(__name__)
+LIBC = ctypes.CDLL(None, use_errno=True)
+LIBC__strerror = LIBC.strerror
+LIBC__strerror.restype = ctypes.c_char_p
+
+
+class Error(mitogen.core.StreamError):
+    pass
+
+
+def setns(kind, fd):
+    if LIBC.setns(int(fd), 0) == -1:
+        errno = ctypes.get_errno()
+        msg = 'setns(%s, %s): %s' % (fd, kind, LIBC__strerror(errno))
+        raise OSError(errno, msg)
+
+
+def _run_command(args):
+    argv = mitogen.parent.Argv(args)
+    try:
+        proc = subprocess.Popen(
+            args=args,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT
+        )
+    except OSError:
+        e = sys.exc_info()[1]
+        raise Error('could not execute %s: %s', argv, e)
+
+    output, _ = proc.communicate()
+    if not proc.returncode:
+        return output.decode('utf-8', 'replace')
+
+    raise Error("%s exitted with status %d: %s",
+                mitogen.parent.Argv(args), proc.returncode, output)
+
+
+def get_docker_pid(path, name):
+    args = [path, 'inspect', '--format={{.State.Pid}}', name]
+    output = _run_command(args)
+    try:
+        return int(output)
+    except ValueError:
+        raise Error("could not find PID from docker output.\n%s", output)
+
+
+def get_lxc_pid(path, name):
+    output = _run_command([path, '-n', name])
+    for line in output.splitlines():
+        bits = line.split()
+        if bits and bits[0] == 'PID:':
+            return int(bits[1])
+
+    raise Error("could not find PID from lxc-info output.\n%s", output)
+
+
+def get_lxd_pid(path, name):
+    output = _run_command([path, 'info', name])
+    for line in output.splitlines():
+        bits = line.split()
+        if bits and bits[0] == 'Pid:':
+            return int(bits[1])
+
+    raise Error("could not find PID from lxc output.\n%s", output)
+
+
+def get_machinectl_pid(path, name):
+    output = _run_command([path, 'status', name])
+    for line in output.splitlines():
+        bits = line.split()
+        if bits and bits[0] == 'Leader:':
+            return int(bits[1])
+
+    raise Error("could not find PID from machinectl output.\n%s", output)
+
+
+GET_LEADER_BY_KIND = {
+    'docker': ('docker_path', get_docker_pid),
+    'lxc': ('lxc_info_path', get_lxc_pid),
+    'lxd': ('lxc_path', get_lxd_pid),
+    'machinectl': ('machinectl_path', get_machinectl_pid),
+}
+
+
+class Options(mitogen.parent.Options):
+    container = None
+    username = 'root'
+    kind = None
+    python_path = 'python'
+    docker_path = 'docker'
+    lxc_path = 'lxc'
+    lxc_info_path = 'lxc-info'
+    machinectl_path = 'machinectl'
+
+    def __init__(self, container, kind, username=None, docker_path=None,
+                 lxc_path=None, lxc_info_path=None, machinectl_path=None,
+                 **kwargs):
+        super(Options, self).__init__(**kwargs)
+        if kind not in GET_LEADER_BY_KIND:
+            raise Error('unsupported container kind: %r', kind)
+
+        self.container = mitogen.core.to_text(container)
+        self.kind = kind
+        if username:
+            self.username = mitogen.core.to_text(username)
+        if docker_path:
+            self.docker_path = docker_path
+        if lxc_path:
+            self.lxc_path = lxc_path
+        if lxc_info_path:
+            self.lxc_info_path = lxc_info_path
+        if machinectl_path:
+            self.machinectl_path = machinectl_path
+
+
+class Connection(mitogen.parent.Connection):
+    options_class = Options
+    child_is_immediate_subprocess = False
+
+    # Order matters. https://github.com/karelzak/util-linux/commit/854d0fe/
+    NS_ORDER = ('ipc', 'uts', 'net', 'pid', 'mnt', 'user')
+
+    def preexec_fn(self):
+        nspath = '/proc/%d/ns/' % (self.leader_pid,)
+        selfpath = '/proc/self/ns/'
+        try:
+            ns_fps = [
+                open(nspath + name)
+                for name in self.NS_ORDER
+                if os.path.exists(nspath + name) and (
+                    os.readlink(nspath + name) != os.readlink(selfpath + name)
+                )
+            ]
+        except Exception:
+            e = sys.exc_info()[1]
+            raise Error(str(e))
+
+        os.chdir('/proc/%s/root' % (self.leader_pid,))
+        os.chroot('.')
+        os.chdir('/')
+        for fp in ns_fps:
+            setns(fp.name, fp.fileno())
+            fp.close()
+
+        for sym in 'endpwent', 'endgrent', 'endspent', 'endsgent':
+            try:
+                getattr(LIBC, sym)()
+            except AttributeError:
+                pass
+
+        try:
+            os.setgroups([grent.gr_gid
+                          for grent in grp.getgrall()
+                          if self.options.username in grent.gr_mem])
+            pwent = pwd.getpwnam(self.options.username)
+            os.setreuid(pwent.pw_uid, pwent.pw_uid)
+            # shadow-4.4/libmisc/setupenv.c. Not done: MAIL, PATH
+            os.environ.update({
+                'HOME': pwent.pw_dir,
+                'SHELL': pwent.pw_shell or '/bin/sh',
+                'LOGNAME': self.options.username,
+                'USER': self.options.username,
+            })
+            if ((os.path.exists(pwent.pw_dir) and
+                 os.access(pwent.pw_dir, os.X_OK))):
+                os.chdir(pwent.pw_dir)
+        except Exception:
+            e = sys.exc_info()[1]
+            raise Error(self.username_msg, self.username, self.container,
+                        type(e).__name__, e)
+
+    username_msg = 'while transitioning to user %r in container %r: %s: %s'
+
+    def get_boot_command(self):
+        # With setns(CLONE_NEWPID), new children of the caller receive a new
+        # PID namespace, however the caller's namespace won't change. That
+        # causes subsequent calls to clone() specifying CLONE_THREAD to fail
+        # with EINVAL, as threads in the same process can't have varying PID
+        # namespaces, meaning starting new threads in the exec'd program will
+        # fail. The solution is forking, so inject a /bin/sh call to achieve
+        # this.
+        argv = super(Connection, self).get_boot_command()
+        # bash will exec() if a single command was specified and the shell has
+        # nothing left to do, so "; exit $?" gives bash a reason to live.
+        return ['/bin/sh', '-c', '%s; exit $?' % (mitogen.parent.Argv(argv),)]
+
+    def create_child(self, args):
+        return mitogen.parent.create_child(args, preexec_fn=self.preexec_fn)
+
+    def _get_name(self):
+        return u'setns.' + self.options.container
+
+    def connect(self, **kwargs):
+        attr, func = GET_LEADER_BY_KIND[self.options.kind]
+        tool_path = getattr(self.options, attr)
+        self.leader_pid = func(tool_path, self.options.container)
+        LOG.debug('Leader PID for %s container %r: %d',
+                  self.options.kind, self.options.container, self.leader_pid)
+        return super(Connection, self).connect(**kwargs)
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/ssh.py b/deployment/lib/mitogen-0.2.9/mitogen/ssh.py
new file mode 100644
index 0000000000000000000000000000000000000000..b276dd28e46b94de6c74ceb65ac056d6e3914604
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/ssh.py
@@ -0,0 +1,294 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+Construct new children via the OpenSSH client.
+"""
+
+import logging
+import re
+
+try:
+    from shlex import quote as shlex_quote
+except ImportError:
+    from pipes import quote as shlex_quote
+
+import mitogen.parent
+from mitogen.core import b
+
+try:
+    any
+except NameError:
+    from mitogen.core import any
+
+
+LOG = logging.getLogger(__name__)
+
+auth_incorrect_msg = 'SSH authentication is incorrect'
+password_incorrect_msg = 'SSH password is incorrect'
+password_required_msg = 'SSH password was requested, but none specified'
+hostkey_config_msg = (
+    'SSH requested permission to accept unknown host key, but '
+    'check_host_keys=ignore. This is likely due to ssh_args=  '
+    'conflicting with check_host_keys=. Please correct your '
+    'configuration.'
+)
+hostkey_failed_msg = (
+    'Host key checking is enabled, and SSH reported an unrecognized or '
+    'mismatching host key.'
+)
+
+# sshpass uses 'assword' because it doesn't lowercase the input.
+PASSWORD_PROMPT_PATTERN = re.compile(
+    b('password'),
+    re.I
+)
+
+HOSTKEY_REQ_PATTERN = re.compile(
+    b(r'are you sure you want to continue connecting \(yes/no\)\?'),
+    re.I
+)
+
+HOSTKEY_FAIL_PATTERN = re.compile(
+    b(r'host key verification failed\.'),
+    re.I
+)
+
+# [user@host: ] permission denied
+# issue #271: work around conflict with user shell reporting 'permission
+# denied' e.g. during chdir($HOME) by only matching it at the start of the
+# line.
+PERMDENIED_PATTERN = re.compile(
+    b('^(?:[^@]+@[^:]+: )?'  # Absent in OpenSSH <7.5
+      'Permission denied'),
+    re.I
+)
+
+DEBUG_PATTERN = re.compile(b('^debug[123]:'))
+
+
+class PasswordError(mitogen.core.StreamError):
+    pass
+
+
+class HostKeyError(mitogen.core.StreamError):
+    pass
+
+
+class SetupProtocol(mitogen.parent.RegexProtocol):
+    """
+    This protocol is attached to stderr of the SSH client. It responds to
+    various interactive prompts as required.
+    """
+    password_sent = False
+
+    def _on_host_key_request(self, line, match):
+        if self.stream.conn.options.check_host_keys == 'accept':
+            LOG.debug('%s: accepting host key', self.stream.name)
+            self.stream.transmit_side.write(b('yes\n'))
+            return
+
+        # _host_key_prompt() should never be reached with ignore or enforce
+        # mode, SSH should have handled that. User's ssh_args= is conflicting
+        # with ours.
+        self.stream.conn._fail_connection(HostKeyError(hostkey_config_msg))
+
+    def _on_host_key_failed(self, line, match):
+        self.stream.conn._fail_connection(HostKeyError(hostkey_failed_msg))
+
+    def _on_permission_denied(self, line, match):
+        if self.stream.conn.options.password is not None and \
+                self.password_sent:
+            self.stream.conn._fail_connection(
+                PasswordError(password_incorrect_msg)
+            )
+        elif PASSWORD_PROMPT_PATTERN.search(line) and \
+                self.stream.conn.options.password is None:
+            # Permission denied (password,pubkey)
+            self.stream.conn._fail_connection(
+                PasswordError(password_required_msg)
+            )
+        else:
+            self.stream.conn._fail_connection(
+                PasswordError(auth_incorrect_msg)
+            )
+
+    def _on_password_prompt(self, line, match):
+        LOG.debug('%s: (password prompt): %s', self.stream.name, line)
+        if self.stream.conn.options.password is None:
+            self.stream.conn._fail(PasswordError(password_required_msg))
+
+        self.stream.transmit_side.write(
+            (self.stream.conn.options.password + '\n').encode('utf-8')
+        )
+        self.password_sent = True
+
+    def _on_debug_line(self, line, match):
+        text = mitogen.core.to_text(line.rstrip())
+        LOG.debug('%s: %s', self.stream.name, text)
+
+    PATTERNS = [
+        (DEBUG_PATTERN, _on_debug_line),
+        (HOSTKEY_FAIL_PATTERN, _on_host_key_failed),
+        (PERMDENIED_PATTERN, _on_permission_denied),
+    ]
+
+    PARTIAL_PATTERNS = [
+        (PASSWORD_PROMPT_PATTERN, _on_password_prompt),
+        (HOSTKEY_REQ_PATTERN, _on_host_key_request),
+    ]
+
+
+class Options(mitogen.parent.Options):
+    #: Default to whatever is available as 'python' on the remote machine,
+    #: overriding sys.executable use.
+    python_path = 'python'
+
+    #: Number of -v invocations to pass on command line.
+    ssh_debug_level = 0
+
+    #: The path to the SSH binary.
+    ssh_path = 'ssh'
+
+    hostname = None
+    username = None
+    port = None
+    identity_file = None
+    password = None
+    ssh_args = None
+
+    check_host_keys_msg = 'check_host_keys= must be set to accept, enforce or ignore'
+
+    def __init__(self, hostname, username=None, ssh_path=None, port=None,
+                 check_host_keys='enforce', password=None, identity_file=None,
+                 compression=True, ssh_args=None, keepalive_enabled=True,
+                 keepalive_count=3, keepalive_interval=15,
+                 identities_only=True, ssh_debug_level=None, **kwargs):
+        super(Options, self).__init__(**kwargs)
+
+        if check_host_keys not in ('accept', 'enforce', 'ignore'):
+            raise ValueError(self.check_host_keys_msg)
+
+        self.hostname = hostname
+        self.username = username
+        self.port = port
+        self.check_host_keys = check_host_keys
+        self.password = password
+        self.identity_file = identity_file
+        self.identities_only = identities_only
+        self.compression = compression
+        self.keepalive_enabled = keepalive_enabled
+        self.keepalive_count = keepalive_count
+        self.keepalive_interval = keepalive_interval
+        if ssh_path:
+            self.ssh_path = ssh_path
+        if ssh_args:
+            self.ssh_args = ssh_args
+        if ssh_debug_level:
+            self.ssh_debug_level = ssh_debug_level
+
+
+class Connection(mitogen.parent.Connection):
+    options_class = Options
+    diag_protocol_class = SetupProtocol
+
+    child_is_immediate_subprocess = False
+
+    def _get_name(self):
+        s = u'ssh.' + mitogen.core.to_text(self.options.hostname)
+        if self.options.port and self.options.port != 22:
+            s += u':%s' % (self.options.port,)
+        return s
+
+    def _requires_pty(self):
+        """
+        Return :data:`True` if a PTY to is required for this configuration,
+        because it must interactively accept host keys or type a password.
+        """
+        return (
+            self.options.check_host_keys == 'accept' or
+            self.options.password is not None
+        )
+
+    def create_child(self, **kwargs):
+        """
+        Avoid PTY use when possible to avoid a scaling limitation.
+        """
+        if self._requires_pty():
+            return mitogen.parent.hybrid_tty_create_child(**kwargs)
+        else:
+            return mitogen.parent.create_child(stderr_pipe=True, **kwargs)
+
+    def get_boot_command(self):
+        bits = [self.options.ssh_path]
+        if self.options.ssh_debug_level:
+            bits += ['-' + ('v' * min(3, self.options.ssh_debug_level))]
+        else:
+            # issue #307: suppress any login banner, as it may contain the
+            # password prompt, and there is no robust way to tell the
+            # difference.
+            bits += ['-o', 'LogLevel ERROR']
+        if self.options.username:
+            bits += ['-l', self.options.username]
+        if self.options.port is not None:
+            bits += ['-p', str(self.options.port)]
+        if self.options.identities_only and (self.options.identity_file or
+                                             self.options.password):
+            bits += ['-o', 'IdentitiesOnly yes']
+        if self.options.identity_file:
+            bits += ['-i', self.options.identity_file]
+        if self.options.compression:
+            bits += ['-o', 'Compression yes']
+        if self.options.keepalive_enabled:
+            bits += [
+                '-o', 'ServerAliveInterval %s' % (
+                    self.options.keepalive_interval,
+                ),
+                '-o', 'ServerAliveCountMax %s' % (
+                    self.options.keepalive_count,
+                ),
+            ]
+        if not self._requires_pty():
+            bits += ['-o', 'BatchMode yes']
+        if self.options.check_host_keys == 'enforce':
+            bits += ['-o', 'StrictHostKeyChecking yes']
+        if self.options.check_host_keys == 'accept':
+            bits += ['-o', 'StrictHostKeyChecking ask']
+        elif self.options.check_host_keys == 'ignore':
+            bits += [
+                '-o', 'StrictHostKeyChecking no',
+                '-o', 'UserKnownHostsFile /dev/null',
+                '-o', 'GlobalKnownHostsFile /dev/null',
+            ]
+        if self.options.ssh_args:
+            bits += self.options.ssh_args
+        bits.append(self.options.hostname)
+        base = super(Connection, self).get_boot_command()
+        return bits + [shlex_quote(s).strip() for s in base]
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/su.py b/deployment/lib/mitogen-0.2.9/mitogen/su.py
new file mode 100644
index 0000000000000000000000000000000000000000..080c978293e701c81e174ecde61088a6962eebbc
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/su.py
@@ -0,0 +1,160 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import logging
+import re
+
+import mitogen.core
+import mitogen.parent
+
+try:
+    any
+except NameError:
+    from mitogen.core import any
+
+
+LOG = logging.getLogger(__name__)
+
+password_incorrect_msg = 'su password is incorrect'
+password_required_msg = 'su password is required'
+
+
+class PasswordError(mitogen.core.StreamError):
+    pass
+
+
+class SetupBootstrapProtocol(mitogen.parent.BootstrapProtocol):
+    password_sent = False
+
+    def setup_patterns(self, conn):
+        """
+        su options cause the regexes used to vary. This is a mess, requires
+        reworking.
+        """
+        incorrect_pattern = re.compile(
+            mitogen.core.b('|').join(
+                re.escape(s.encode('utf-8'))
+                for s in conn.options.incorrect_prompts
+            ),
+            re.I
+        )
+        prompt_pattern = re.compile(
+            re.escape(
+                conn.options.password_prompt.encode('utf-8')
+            ),
+            re.I
+        )
+
+        self.PATTERNS = mitogen.parent.BootstrapProtocol.PATTERNS + [
+            (incorrect_pattern, type(self)._on_password_incorrect),
+        ]
+        self.PARTIAL_PATTERNS = mitogen.parent.BootstrapProtocol.PARTIAL_PATTERNS + [
+            (prompt_pattern, type(self)._on_password_prompt),
+        ]
+
+    def _on_password_prompt(self, line, match):
+        LOG.debug('%r: (password prompt): %r',
+                  self.stream.name, line.decode('utf-8', 'replace'))
+
+        if self.stream.conn.options.password is None:
+            self.stream.conn._fail_connection(
+                PasswordError(password_required_msg)
+            )
+            return
+
+        if self.password_sent:
+            self.stream.conn._fail_connection(
+                PasswordError(password_incorrect_msg)
+            )
+            return
+
+        self.stream.transmit_side.write(
+            (self.stream.conn.options.password + '\n').encode('utf-8')
+        )
+        self.password_sent = True
+
+    def _on_password_incorrect(self, line, match):
+        self.stream.conn._fail_connection(
+            PasswordError(password_incorrect_msg)
+        )
+
+
+class Options(mitogen.parent.Options):
+    username = u'root'
+    password = None
+    su_path = 'su'
+    password_prompt = u'password:'
+    incorrect_prompts = (
+        u'su: sorry',                   # BSD
+        u'su: authentication failure',  # Linux
+        u'su: incorrect password',      # CentOS 6
+        u'authentication is denied',    # AIX
+    )
+
+    def __init__(self, username=None, password=None, su_path=None,
+                 password_prompt=None, incorrect_prompts=None, **kwargs):
+        super(Options, self).__init__(**kwargs)
+        if username is not None:
+            self.username = mitogen.core.to_text(username)
+        if password is not None:
+            self.password = mitogen.core.to_text(password)
+        if su_path is not None:
+            self.su_path = su_path
+        if password_prompt is not None:
+            self.password_prompt = password_prompt
+        if incorrect_prompts is not None:
+            self.incorrect_prompts = [
+                mitogen.core.to_text(p)
+                for p in incorrect_prompts
+            ]
+
+
+class Connection(mitogen.parent.Connection):
+    options_class = Options
+    stream_protocol_class = SetupBootstrapProtocol
+
+    # TODO: BSD su cannot handle stdin being a socketpair, but it does let the
+    # child inherit fds from the parent. So we can still pass a socketpair in
+    # for hybrid_tty_create_child(), there just needs to be either a shell
+    # snippet or bootstrap support for fixing things up afterwards.
+    create_child = staticmethod(mitogen.parent.tty_create_child)
+    child_is_immediate_subprocess = False
+
+    def _get_name(self):
+        return u'su.' + self.options.username
+
+    def stream_factory(self):
+        stream = super(Connection, self).stream_factory()
+        stream.protocol.setup_patterns(self)
+        return stream
+
+    def get_boot_command(self):
+        argv = mitogen.parent.Argv(super(Connection, self).get_boot_command())
+        return [self.options.su_path, self.options.username, '-c', str(argv)]
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/sudo.py b/deployment/lib/mitogen-0.2.9/mitogen/sudo.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea07d0c1926a1a19908973246e523342049c5fa3
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/sudo.py
@@ -0,0 +1,271 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import base64
+import logging
+import optparse
+import re
+
+import mitogen.core
+import mitogen.parent
+
+
+LOG = logging.getLogger(__name__)
+
+password_incorrect_msg = 'sudo password is incorrect'
+password_required_msg = 'sudo password is required'
+
+# These are base64-encoded UTF-8 as our existing minifier/module server
+# struggles with Unicode Python source in some (forgotten) circumstances.
+PASSWORD_PROMPTS = [
+    'cGFzc3dvcmQ=',                                              # english
+    'bG96aW5rYQ==',                                              # sr@latin.po
+    '44OR44K544Ov44O844OJ',                                      # ja.po
+    '4Kaq4Ka+4Ka44KaT4Kef4Ka+4Kaw4KeN4Kah',                      # bn.po
+    '2YPZhNmF2Kkg2KfZhNiz2LE=',                                  # ar.po
+    'cGFzYWhpdHph',                                              # eu.po
+    '0L/QsNGA0L7Qu9GM',                                          # uk.po
+    'cGFyb29s',                                                  # et.po
+    'c2FsYXNhbmE=',                                              # fi.po
+    '4Kiq4Ki+4Ki44Ki14Kiw4Kih',                                  # pa.po
+    'Y29udHJhc2lnbm8=',                                          # ia.po
+    'Zm9jYWwgZmFpcmU=',                                          # ga.po
+    '16HXodee15Q=',                                              # he.po
+    '4Kqq4Kq+4Kq44Kq14Kqw4KuN4Kqh',                              # gu.po
+    '0L/QsNGA0L7Qu9Cw',                                          # bg.po
+    '4Kyq4K2N4Kyw4Kys4K2H4Ky2IOCsuOCsmeCtjeCsleCth+CspA==',      # or.po
+    '4K6V4K6f4K614K+B4K6a4K+N4K6a4K+K4K6y4K+N',                  # ta.po
+    'cGFzc3dvcnQ=',                                              # de.po
+    '7JWU7Zi4',                                                  # ko.po
+    '0LvQvtC30LjQvdC60LA=',                                      # sr.po
+    'beG6rXQga2jhuql1',                                          # vi.po
+    'c2VuaGE=',                                                  # pt_BR.po
+    'cGFzc3dvcmQ=',                                              # it.po
+    'aGVzbG8=',                                                  # cs.po
+    '5a+G56K877ya',                                              # zh_TW.po
+    'aGVzbG8=',                                                  # sk.po
+    '4LC44LCC4LCV4LGH4LCk4LCq4LCm4LCu4LGB',                      # te.po
+    '0L/QsNGA0L7Qu9GM',                                          # kk.po
+    'aGFzxYJv',                                                  # pl.po
+    'Y29udHJhc2VueWE=',                                          # ca.po
+    'Y29udHJhc2XDsWE=',                                          # es.po
+    '4LSF4LSf4LSv4LS+4LSz4LS14LS+4LSV4LWN4LSV4LWN',              # ml.po
+    'c2VuaGE=',                                                  # pt.po
+    '5a+G56CB77ya',                                              # zh_CN.po
+    '4KSX4KWB4KSq4KWN4KSk4KS24KSs4KWN4KSm',                      # mr.po
+    'bMO2c2Vub3Jk',                                              # sv.po
+    '4YOe4YOQ4YOg4YOd4YOa4YOY',                                  # ka.po
+    '4KS24KSs4KWN4KSm4KSV4KWC4KSf',                              # hi.po
+    'YWRnYW5nc2tvZGU=',                                          # da.po
+    '4La74LeE4LeD4LeK4La04Lav4La6',                              # si.po
+    'cGFzc29yZA==',                                              # nb.po
+    'd2FjaHR3b29yZA==',                                          # nl.po
+    '4Kaq4Ka+4Ka44KaT4Kef4Ka+4Kaw4KeN4Kah',                      # bn_IN.po
+    'cGFyb2xh',                                                  # tr.po
+    '4LKX4LOB4LKq4LON4LKk4LKq4LKm',                              # kn.po
+    'c2FuZGk=',                                                  # id.po
+    '0L/QsNGA0L7Qu9GM',                                          # ru.po
+    'amVsc3rDsw==',                                              # hu.po
+    'bW90IGRlIHBhc3Nl',                                          # fr.po
+    'aXBoYXNpd2VkaQ==',                                          # zu.po
+    '4Z6W4Z624Z6A4Z+S4Z6Z4Z6f4Z6Y4Z+S4Z6E4Z624Z6P4Z+LwqDhn5Y=',  # km.po
+    '4KaX4KeB4Kaq4KeN4Kak4Ka24Kas4KeN4Kam',                      # as.po
+]
+
+
+PASSWORD_PROMPT_RE = re.compile(
+    mitogen.core.b('|').join(
+        base64.b64decode(s)
+        for s in PASSWORD_PROMPTS
+    ),
+    re.I
+)
+
+SUDO_OPTIONS = [
+    #(False, 'bool', '--askpass', '-A')
+    #(False, 'str', '--auth-type', '-a')
+    #(False, 'bool', '--background', '-b')
+    #(False, 'str', '--close-from', '-C')
+    #(False, 'str', '--login-class', 'c')
+    (True,  'bool', '--preserve-env', '-E'),
+    #(False, 'bool', '--edit', '-e')
+    #(False, 'str', '--group', '-g')
+    (True,  'bool', '--set-home', '-H'),
+    #(False, 'str', '--host', '-h')
+    (False, 'bool', '--login', '-i'),
+    #(False, 'bool', '--remove-timestamp', '-K')
+    #(False, 'bool', '--reset-timestamp', '-k')
+    #(False, 'bool', '--list', '-l')
+    #(False, 'bool', '--preserve-groups', '-P')
+    #(False, 'str', '--prompt', '-p')
+
+    # SELinux options. Passed through as-is.
+    (False, 'str', '--role', '-r'),
+    (False, 'str', '--type', '-t'),
+
+    # These options are supplied by default by Ansible, but are ignored, as
+    # sudo always runs under a TTY with Mitogen.
+    (True, 'bool', '--stdin', '-S'),
+    (True, 'bool', '--non-interactive', '-n'),
+
+    #(False, 'str', '--shell', '-s')
+    #(False, 'str', '--other-user', '-U')
+    (False, 'str', '--user', '-u'),
+    #(False, 'bool', '--version', '-V')
+    #(False, 'bool', '--validate', '-v')
+]
+
+
+class OptionParser(optparse.OptionParser):
+    def help(self):
+        self.exit()
+    def error(self, msg):
+        self.exit(msg=msg)
+    def exit(self, status=0, msg=None):
+        msg = 'sudo: ' + (msg or 'unsupported option')
+        raise mitogen.core.StreamError(msg)
+
+
+def make_sudo_parser():
+    parser = OptionParser()
+    for supported, kind, longopt, shortopt in SUDO_OPTIONS:
+        if kind == 'bool':
+            parser.add_option(longopt, shortopt, action='store_true')
+        else:
+            parser.add_option(longopt, shortopt)
+    return parser
+
+
+def parse_sudo_flags(args):
+    parser = make_sudo_parser()
+    opts, args = parser.parse_args(args)
+    if len(args):
+        raise mitogen.core.StreamError('unsupported sudo arguments:'+str(args))
+    return opts
+
+
+class PasswordError(mitogen.core.StreamError):
+    pass
+
+
+def option(default, *args):
+    for arg in args:
+        if arg is not None:
+            return arg
+    return default
+
+
+class Options(mitogen.parent.Options):
+    sudo_path = 'sudo'
+    username = 'root'
+    password = None
+    preserve_env = False
+    set_home = False
+    login = False
+
+    selinux_role = None
+    selinux_type = None
+
+    def __init__(self, username=None, sudo_path=None, password=None,
+                 preserve_env=None, set_home=None, sudo_args=None,
+                 login=None, selinux_role=None, selinux_type=None, **kwargs):
+        super(Options, self).__init__(**kwargs)
+        opts = parse_sudo_flags(sudo_args or [])
+
+        self.username = option(self.username, username, opts.user)
+        self.sudo_path = option(self.sudo_path, sudo_path)
+        if password:
+            self.password = mitogen.core.to_text(password)
+        self.preserve_env = option(self.preserve_env,
+            preserve_env, opts.preserve_env)
+        self.set_home = option(self.set_home, set_home, opts.set_home)
+        self.login = option(self.login, login, opts.login)
+        self.selinux_role = option(self.selinux_role, selinux_role, opts.role)
+        self.selinux_type = option(self.selinux_type, selinux_type, opts.type)
+
+
+class SetupProtocol(mitogen.parent.RegexProtocol):
+    password_sent = False
+
+    def _on_password_prompt(self, line, match):
+        LOG.debug('%s: (password prompt): %s',
+            self.stream.name, line.decode('utf-8', 'replace'))
+
+        if self.stream.conn.options.password is None:
+            self.stream.conn._fail_connection(
+                PasswordError(password_required_msg)
+            )
+            return
+
+        if self.password_sent:
+            self.stream.conn._fail_connection(
+                PasswordError(password_incorrect_msg)
+            )
+            return
+
+        self.stream.transmit_side.write(
+            (self.stream.conn.options.password + '\n').encode('utf-8')
+        )
+        self.password_sent = True
+
+    PARTIAL_PATTERNS = [
+        (PASSWORD_PROMPT_RE, _on_password_prompt),
+    ]
+
+
+class Connection(mitogen.parent.Connection):
+    diag_protocol_class = SetupProtocol
+    options_class = Options
+    create_child = staticmethod(mitogen.parent.hybrid_tty_create_child)
+    create_child_args = {
+        'escalates_privilege': True,
+    }
+    child_is_immediate_subprocess = False
+
+    def _get_name(self):
+        return u'sudo.' + mitogen.core.to_text(self.options.username)
+
+    def get_boot_command(self):
+        # Note: sudo did not introduce long-format option processing until July
+        # 2013, so even though we parse long-format options, supply short-form
+        # to the sudo command.
+        bits = [self.options.sudo_path, '-u', self.options.username]
+        if self.options.preserve_env:
+            bits += ['-E']
+        if self.options.set_home:
+            bits += ['-H']
+        if self.options.login:
+            bits += ['-i']
+        if self.options.selinux_role:
+            bits += ['-r', self.options.selinux_role]
+        if self.options.selinux_type:
+            bits += ['-t', self.options.selinux_type]
+
+        return bits + ['--'] + super(Connection, self).get_boot_command()
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/unix.py b/deployment/lib/mitogen-0.2.9/mitogen/unix.py
new file mode 100644
index 0000000000000000000000000000000000000000..1af1c0ec6b66522ccdaa603778a48f45502f81cc
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/unix.py
@@ -0,0 +1,226 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+Permit connection of additional contexts that may act with the authority of
+this context. For now, the UNIX socket is always mode 0600, i.e. can only be
+accessed by root or the same UID. Therefore we can always trust connections to
+have the same privilege (auth_id) as the current process.
+"""
+
+import errno
+import logging
+import os
+import socket
+import struct
+import sys
+import tempfile
+
+import mitogen.core
+import mitogen.master
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Error(mitogen.core.Error):
+    """
+    Base for errors raised by :mod:`mitogen.unix`.
+    """
+    pass
+
+
+class ConnectError(Error):
+    """
+    Raised when :func:`mitogen.unix.connect` fails to connect to the listening
+    socket.
+    """
+    #: UNIX error number reported by underlying exception.
+    errno = None
+
+
+def is_path_dead(path):
+    s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+    try:
+        try:
+            s.connect(path)
+        except socket.error:
+            e = sys.exc_info()[1]
+            return e.args[0] in (errno.ECONNREFUSED, errno.ENOENT)
+    finally:
+        s.close()
+    return False
+
+
+def make_socket_path():
+    return tempfile.mktemp(prefix='mitogen_unix_', suffix='.sock')
+
+
+class ListenerStream(mitogen.core.Stream):
+    def on_receive(self, broker):
+        sock, _ = self.receive_side.fp.accept()
+        try:
+            self.protocol.on_accept_client(sock)
+        except:
+            sock.close()
+            raise
+
+
+class Listener(mitogen.core.Protocol):
+    stream_class = ListenerStream
+    keep_alive = True
+
+    @classmethod
+    def build_stream(cls, router, path=None, backlog=100):
+        if not path:
+            path = make_socket_path()
+        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+        if os.path.exists(path) and is_path_dead(path):
+            LOG.debug('%r: deleting stale %r', cls.__name__, path)
+            os.unlink(path)
+
+        sock.bind(path)
+        os.chmod(path, int('0600', 8))
+        sock.listen(backlog)
+
+        stream = super(Listener, cls).build_stream(router, path)
+        stream.accept(sock, sock)
+        router.broker.start_receive(stream)
+        return stream
+
+    def __repr__(self):
+        return '%s.%s(%r)' % (
+            __name__,
+            self.__class__.__name__,
+            self.path,
+        )
+
+    def __init__(self, router, path):
+        self._router = router
+        self.path = path
+
+    def _unlink_socket(self):
+        try:
+            os.unlink(self.path)
+        except OSError:
+            e = sys.exc_info()[1]
+            # Prevent a shutdown race with the parent process.
+            if e.args[0] != errno.ENOENT:
+                raise
+
+    def on_shutdown(self, broker):
+        broker.stop_receive(self.stream)
+        self._unlink_socket()
+        self.stream.receive_side.close()
+
+    def on_accept_client(self, sock):
+        sock.setblocking(True)
+        try:
+            pid, = struct.unpack('>L', sock.recv(4))
+        except (struct.error, socket.error):
+            LOG.error('listener: failed to read remote identity: %s',
+                      sys.exc_info()[1])
+            return
+
+        context_id = self._router.id_allocator.allocate()
+        try:
+            sock.send(struct.pack('>LLL', context_id, mitogen.context_id,
+                                  os.getpid()))
+        except socket.error:
+            LOG.error('listener: failed to assign identity to PID %d: %s',
+                      pid, sys.exc_info()[1])
+            return
+
+        context = mitogen.parent.Context(self._router, context_id)
+        stream = mitogen.core.MitogenProtocol.build_stream(
+            router=self._router,
+            remote_id=context_id,
+            auth_id=mitogen.context_id,
+        )
+        stream.name = u'unix_client.%d' % (pid,)
+        stream.accept(sock, sock)
+        LOG.debug('listener: accepted connection from PID %d: %s',
+                  pid, stream.name)
+        self._router.register(context, stream)
+
+
+def _connect(path, broker, sock):
+    try:
+        # ENOENT, ECONNREFUSED
+        sock.connect(path)
+
+        # ECONNRESET
+        sock.send(struct.pack('>L', os.getpid()))
+        mitogen.context_id, remote_id, pid = struct.unpack('>LLL', sock.recv(12))
+    except socket.error:
+        e = sys.exc_info()[1]
+        ce = ConnectError('could not connect to %s: %s', path, e.args[1])
+        ce.errno = e.args[0]
+        raise ce
+
+    mitogen.parent_id = remote_id
+    mitogen.parent_ids = [remote_id]
+
+    LOG.debug('client: local ID is %r, remote is %r',
+              mitogen.context_id, remote_id)
+
+    router = mitogen.master.Router(broker=broker)
+    stream = mitogen.core.MitogenProtocol.build_stream(router, remote_id)
+    stream.accept(sock, sock)
+    stream.name = u'unix_listener.%d' % (pid,)
+
+    mitogen.core.listen(stream, 'disconnect', _cleanup)
+    mitogen.core.listen(router.broker, 'shutdown',
+        lambda: router.disconnect_stream(stream))
+
+    context = mitogen.parent.Context(router, remote_id)
+    router.register(context, stream)
+    return router, context
+
+
+def connect(path, broker=None):
+    LOG.debug('client: connecting to %s', path)
+    sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+    try:
+        return _connect(path, broker, sock)
+    except:
+        sock.close()
+        raise
+
+
+def _cleanup():
+    """
+    Reset mitogen.context_id and friends when our connection to the parent is
+    lost. Per comments on #91, these globals need to move to the Router so
+    fix-ups like this become unnecessary.
+    """
+    mitogen.context_id = 0
+    mitogen.parent_id = None
+    mitogen.parent_ids = []
diff --git a/deployment/lib/mitogen-0.2.9/mitogen/utils.py b/deployment/lib/mitogen-0.2.9/mitogen/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1347d022fdeb1717b9bc880b4e8f474d14b1855
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/mitogen/utils.py
@@ -0,0 +1,226 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import datetime
+import logging
+import os
+import sys
+
+import mitogen
+import mitogen.core
+import mitogen.master
+import mitogen.parent
+
+
+iteritems = getattr(dict, 'iteritems', dict.items)
+
+if mitogen.core.PY3:
+    iteritems = dict.items
+else:
+    iteritems = dict.iteritems
+
+
+def setup_gil():
+    """
+    Set extremely long GIL release interval to let threads naturally progress
+    through CPU-heavy sequences without forcing the wake of another thread that
+    may contend trying to run the same CPU-heavy code. For the new-style
+    Ansible work, this drops runtime ~33% and involuntary context switches by
+    >80%, essentially making threads cooperatively scheduled.
+    """
+    try:
+        # Python 2.
+        sys.setcheckinterval(100000)
+    except AttributeError:
+        pass
+
+    try:
+        # Python 3.
+        sys.setswitchinterval(10)
+    except AttributeError:
+        pass
+
+
+def disable_site_packages():
+    """
+    Remove all entries mentioning ``site-packages`` or ``Extras`` from
+    :attr:sys.path. Used primarily for testing on OS X within a virtualenv,
+    where OS X bundles some ancient version of the :mod:`six` module.
+    """
+    for entry in sys.path[:]:
+        if 'site-packages' in entry or 'Extras' in entry:
+            sys.path.remove(entry)
+
+
+def _formatTime(record, datefmt=None):
+    dt = datetime.datetime.fromtimestamp(record.created)
+    return dt.strftime(datefmt)
+
+
+def log_get_formatter():
+    datefmt = '%H:%M:%S'
+    if sys.version_info > (2, 6):
+        datefmt += '.%f'
+    fmt = '%(asctime)s %(levelname).1s %(name)s: %(message)s'
+    formatter = logging.Formatter(fmt, datefmt)
+    formatter.formatTime = _formatTime
+    return formatter
+
+
+def log_to_file(path=None, io=False, level='INFO'):
+    """
+    Install a new :class:`logging.Handler` writing applications logs to the
+    filesystem. Useful when debugging slave IO problems.
+
+    Parameters to this function may be overridden at runtime using environment
+    variables. See :ref:`logging-env-vars`.
+
+    :param str path:
+        If not :data:`None`, a filesystem path to write logs to. Otherwise,
+        logs are written to :data:`sys.stderr`.
+
+    :param bool io:
+        If :data:`True`, include extremely verbose IO logs in the output.
+        Useful for debugging hangs, less useful for debugging application code.
+
+    :param str level:
+        Name of the :mod:`logging` package constant that is the minimum level
+        to log at. Useful levels are ``DEBUG``, ``INFO``, ``WARNING``, and
+        ``ERROR``.
+    """
+    log = logging.getLogger('')
+    if path:
+        fp = open(path, 'w', 1)
+        mitogen.core.set_cloexec(fp.fileno())
+    else:
+        fp = sys.stderr
+
+    level = os.environ.get('MITOGEN_LOG_LEVEL', level).upper()
+    io = level == 'IO'
+    if io:
+        level = 'DEBUG'
+        logging.getLogger('mitogen.io').setLevel(level)
+
+    level = getattr(logging, level, logging.INFO)
+    log.setLevel(level)
+
+    # Prevent accidental duplicate log_to_file() calls from generating
+    # duplicate output.
+    for handler_ in reversed(log.handlers):
+        if getattr(handler_, 'is_mitogen', None):
+            log.handlers.remove(handler_)
+
+    handler = logging.StreamHandler(fp)
+    handler.is_mitogen = True
+    handler.formatter = log_get_formatter()
+    log.handlers.insert(0, handler)
+
+
+def run_with_router(func, *args, **kwargs):
+    """
+    Arrange for `func(router, *args, **kwargs)` to run with a temporary
+    :class:`mitogen.master.Router`, ensuring the Router and Broker are
+    correctly shut down during normal or exceptional return.
+
+    :returns:
+        `func`'s return value.
+    """
+    broker = mitogen.master.Broker()
+    router = mitogen.master.Router(broker)
+    try:
+        return func(router, *args, **kwargs)
+    finally:
+        broker.shutdown()
+        broker.join()
+
+
+def with_router(func):
+    """
+    Decorator version of :func:`run_with_router`. Example:
+
+    .. code-block:: python
+
+        @with_router
+        def do_stuff(router, arg):
+            pass
+
+        do_stuff(blah, 123)
+    """
+    def wrapper(*args, **kwargs):
+        return run_with_router(func, *args, **kwargs)
+    if mitogen.core.PY3:
+        wrapper.func_name = func.__name__
+    else:
+        wrapper.func_name = func.func_name
+    return wrapper
+
+
+PASSTHROUGH = (
+    int, float, bool,
+    type(None),
+    mitogen.core.Context,
+    mitogen.core.CallError,
+    mitogen.core.Blob,
+    mitogen.core.Secret,
+)
+
+
+def cast(obj):
+    """
+    Many tools love to subclass built-in types in order to implement useful
+    functionality, such as annotating the safety of a Unicode string, or adding
+    additional methods to a dict. However, cPickle loves to preserve those
+    subtypes during serialization, resulting in CallError during :meth:`call
+    <mitogen.parent.Context.call>` in the target when it tries to deserialize
+    the data.
+
+    This function walks the object graph `obj`, producing a copy with any
+    custom sub-types removed. The functionality is not default since the
+    resulting walk may be computationally expensive given a large enough graph.
+
+    See :ref:`serialization-rules` for a list of supported types.
+
+    :param obj:
+        Object to undecorate.
+    :returns:
+        Undecorated object.
+    """
+    if isinstance(obj, dict):
+        return dict((cast(k), cast(v)) for k, v in iteritems(obj))
+    if isinstance(obj, (list, tuple)):
+        return [cast(v) for v in obj]
+    if isinstance(obj, PASSTHROUGH):
+        return obj
+    if isinstance(obj, mitogen.core.UnicodeType):
+        return mitogen.core.UnicodeType(obj)
+    if isinstance(obj, mitogen.core.BytesType):
+        return mitogen.core.BytesType(obj)
+
+    raise TypeError("Cannot serialize: %r: %r" % (type(obj), obj))
diff --git a/deployment/lib/mitogen-0.2.9/setup.cfg b/deployment/lib/mitogen-0.2.9/setup.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..98ceb29d7537ab41ad7a9fdde4c8bb79bad58036
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/setup.cfg
@@ -0,0 +1,15 @@
+[coverage:run]
+branch = true
+source = 
+	mitogen
+omit = 
+	mitogen/compat/*
+
+[flake8]
+ignore = E402,E128,W503,E731
+exclude = mitogen/compat
+
+[egg_info]
+tag_build = 
+tag_date = 0
+
diff --git a/deployment/lib/mitogen-0.2.9/setup.py b/deployment/lib/mitogen-0.2.9/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..c325799687984da4a504594c35aafab303e84476
--- /dev/null
+++ b/deployment/lib/mitogen-0.2.9/setup.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python2
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from setuptools import find_packages, setup
+
+
+def grep_version():
+    path = os.path.join(os.path.dirname(__file__), 'mitogen/__init__.py')
+    with open(path) as fp:
+        for line in fp:
+            if line.startswith('__version__'):
+                _, _, s = line.partition('=')
+                return '.'.join(map(str, eval(s)))
+
+
+setup(
+    name = 'mitogen',
+    version = grep_version(),
+    description = 'Library for writing distributed self-replicating programs.',
+    author = 'David Wilson',
+    license = 'New BSD',
+    url = 'https://github.com/dw/mitogen/',
+    packages = find_packages(exclude=['tests', 'examples']),
+    zip_safe = False,
+    classifiers = [
+        'Environment :: Console',
+        'Intended Audience :: System Administrators',
+        'License :: OSI Approved :: BSD License',
+        'Operating System :: POSIX',
+        'Programming Language :: Python',
+        'Programming Language :: Python :: 2.4',
+        'Programming Language :: Python :: 2.5',
+        'Programming Language :: Python :: 2.6',
+        'Programming Language :: Python :: 2.7',
+        'Programming Language :: Python :: 3.6',
+        'Programming Language :: Python :: Implementation :: CPython',
+        'Topic :: System :: Distributed Computing',
+        'Topic :: System :: Systems Administration',
+    ],
+)
diff --git a/deployment/roles/mongo_init/tasks/execute_script.yml b/deployment/roles/mongo_init/tasks/execute_script.yml
deleted file mode 100644
index e21930a489748ecc570de8abe3fea463e22e4b8f..0000000000000000000000000000000000000000
--- a/deployment/roles/mongo_init/tasks/execute_script.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-
-- fail: msg="Variable '{{ mongo_file }}' is not defined"
-  when: mongo_file is undefined
-
-- name: 
-  debug:
-    msg: ">>>> Execution of the file {{ mongo_file.finalname }}<<<<"
-
-- name: Check if the script exists
-  stat:
-    path: "{{ mongod_output_dir_entry_point }}/{{ mongo_file.finalname }}"
-  register: stat_result
-
-- block:
-    - fail: msg="The file '{{ mongo_file.finalname }}' is not exist"
-      when: not stat_result.stat.exists
-
-    - name: Get script content.
-      shell: "cat {{ mongod_output_dir_entry_point }}/{{ mongo_file.finalname }}"
-      register: script_content
-    
-    - name: Compute versionned script files
-      template:
-        src: "versioned_script.js.j2"
-        dest: "{{ mongod_output_dir_entry_point }}/{{ mongo_file.finalname }}"
-        owner: "{{ vitamui_defaults.users.vitamuidb }}"
-        group: "{{ vitamui_defaults.users.group }}"
-        mode: 0755        
-
-  when: mongodb.versioning is defined and mongodb.versioning.enable
-     
-- name: Load script in database
-  shell: "mongo \"mongodb://{{ mongod_uri }}/admin\" {{ mongo_credentials }} {{ mongod_output_dir_entry_point }}/{{ mongo_file.finalname }}"
-  no_log: "{{ hide_passwords_during_deploy }}"
-  when: mongodb.docker is not defined or not mongodb.docker.enable
-
-- name: Load script in database (docker)
-  command: "docker exec --tty {{ mongodb.docker.image_name }} /bin/bash -c \"mongo \\\"mongodb://{{ mongod_uri }}/admin\\\" {{ mongo_credentials }} {{ mongodb.docker.internal_dir}}/app/mongod/{{ mongo_file.finalname }}\""
-  no_log: "{{ hide_passwords_during_deploy }}"
-  when: mongodb.docker is defined and mongodb.docker.enable
diff --git a/deployment/roles/mongo_init/tasks/main.yml b/deployment/roles/mongo_init/tasks/main.yml
index fb02030c8a50bc6c60fb4d58832cea813e9efdad..ab8ea65add6716423687aaf9dcdec8ed0f1daa1c 100644
--- a/deployment/roles/mongo_init/tasks/main.yml
+++ b/deployment/roles/mongo_init/tasks/main.yml
@@ -78,8 +78,33 @@
     mode: 0755
   loop: "{{ mongod_eligible_files | unique }}"
 
-- name: "Execute file"
-  include_tasks: "execute_script.yml"
+- name: "Prepare file"
+  include_tasks: "prepare_script.yml"
+  when: mongodb.versioning is defined and mongodb.versioning.enable
   loop: "{{ mongod_eligible_files | unique }}"
   loop_control:
     loop_var: mongo_file
+
+- name: Compute main script file
+  template:
+    src: "main_script.js.j2"
+    dest: "{{ mongod_output_dir_entry_point }}/main_script.js"
+    owner: "{{ vitamui_defaults.users.vitamuidb }}"
+    group: "{{ vitamui_defaults.users.group }}"
+    mode: 0755     
+
+- name: Load script in database
+  shell: "mongo \"mongodb://{{ mongod_uri }}/admin\" {{ mongo_credentials }} {{ mongod_output_dir_entry_point }}/main_script.js"
+  no_log: "{{ hide_passwords_during_deploy }}"
+  when: mongodb.docker is not defined or not mongodb.docker.enable
+
+- name: Load script in database (docker)
+  command: "docker exec --tty {{ mongodb.docker.image_name }} /bin/bash -c \"mongo \\\"mongodb://{{ mongod_uri }}/admin\\\" {{ mongo_credentials }} {{ mongodb.docker.internal_dir}}/app/mongod/main_script.js \""
+  no_log: "{{ hide_passwords_during_deploy }}"
+  when: mongodb.docker is defined and mongodb.docker.enable
+
+# - name: "Execute file"
+#   include_tasks: "execute_script.yml"
+#   loop: "{{ mongod_eligible_files | unique }}"
+#   loop_control:
+#     loop_var: mongo_file
\ No newline at end of file
diff --git a/deployment/roles/mongo_init/tasks/prepare_script.yml b/deployment/roles/mongo_init/tasks/prepare_script.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c37870479068bc85306d262dc9a0c37bcf9dafd8
--- /dev/null
+++ b/deployment/roles/mongo_init/tasks/prepare_script.yml
@@ -0,0 +1,29 @@
+---
+
+- fail: msg="Variable '{{ mongo_file }}' is not defined"
+  when: mongo_file is undefined
+
+- name: 
+  debug:
+    msg: ">>>> Execution of the file {{ mongo_file.finalname }}<<<<"
+
+- name: Check if the script exists
+  stat:
+    path: "{{ mongod_output_dir_entry_point }}/{{ mongo_file.finalname }}"
+  register: stat_result
+
+- fail: msg="The file '{{ mongo_file.finalname }}' is not exist"
+  when: not stat_result.stat.exists
+
+- name: Get script content.
+  shell: "cat {{ mongod_output_dir_entry_point }}/{{ mongo_file.finalname }}"
+  register: script_content
+
+- name: Compute versionned script files
+  template:
+    src: "versioned_script.js.j2"
+    dest: "{{ mongod_output_dir_entry_point }}/{{ mongo_file.finalname }}"
+    owner: "{{ vitamui_defaults.users.vitamuidb }}"
+    group: "{{ vitamui_defaults.users.group }}"
+    mode: 0755        
+
diff --git a/deployment/roles/mongo_init/templates/main_script.js.j2 b/deployment/roles/mongo_init/templates/main_script.js.j2
new file mode 100644
index 0000000000000000000000000000000000000000..11983d1622a532ba2dd5e19fc20fb960e3610c59
--- /dev/null
+++ b/deployment/roles/mongo_init/templates/main_script.js.j2
@@ -0,0 +1,7 @@
+{% for script in mongod_eligible_files %}
+  {% if mongodb.docker is defined and mongodb.docker.enable %}
+    load('{{ mongodb.docker.internal_dir}}/app/mongod/{{ script.finalname }}');
+  {% else %}
+    load('{{mongod_output_dir_entry_point}}/{{ script.finalname }}');
+  {% endif %}
+{% endfor %}
\ No newline at end of file
diff --git a/tools/docker/mongo/ansible.cfg b/tools/docker/mongo/ansible.cfg
index 9e9c5f41eb71f4b3b4f4606f9229cd03196cc7b8..e984571cb2f44a5d4664d5559257feb3f41a2f5d 100644
--- a/tools/docker/mongo/ansible.cfg
+++ b/tools/docker/mongo/ansible.cfg
@@ -9,5 +9,9 @@ filter_plugins =  ./../../../deployment/filter_plugins/
 # This is 2 hours
 fact_caching_timeout = 7200
 
+host_key_checking = False
+strategy_plugins = ../../../deployment/lib/mitogen-0.2.9/ansible_mitogen/plugins/strategy
+strategy = mitogen_linear
+
 [ssh_connection]
 pipelining = True