Fixed .gitignore file
diff --git a/.gitignore b/.gitignore
index 8272c5c..057e14f 100755
--- a/.gitignore
+++ b/.gitignore
@@ -34,3 +34,6 @@
 /public/neon_old/*
 
 env/*
+env2/*
+userale
+es
\ No newline at end of file
diff --git a/docker/tap/Dockerfile b/docker/tap/Dockerfile
index b4853ed..7445054 100644
--- a/docker/tap/Dockerfile
+++ b/docker/tap/Dockerfile
@@ -40,7 +40,7 @@
   git
 
 # Clone TAP
-RUN git clone -b master https://github.com/apache/incubator-senssoft-tap.git app
+RUN git clone -b docker https://github.com/apache/incubator-senssoft-tap.git app
 WORKDIR /usr/src/app
 RUN git pull
 
@@ -55,11 +55,9 @@
 ADD neon_counts.js /usr/src/app/public
 ADD neon_graph.js /usr/src/app/public
 
-# Export port
-EXPOSE 8000
-
-# Migrate for django
-#RUN python manage.py migrate
 
 # Startup Application
-RUN gulp dev
+RUN gulp build
+
+# Export port
+EXPOSE 8000
diff --git a/env2/.Python b/env2/.Python
deleted file mode 120000
index cc24a1e..0000000
--- a/env2/.Python
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/Python
\ No newline at end of file
diff --git a/env2/bin/activate b/env2/bin/activate
deleted file mode 100644
index 16b1850..0000000
--- a/env2/bin/activate
+++ /dev/null
@@ -1,78 +0,0 @@
-# This file must be used with "source bin/activate" *from bash*
-# you cannot run it directly
-
-deactivate () {
-    unset -f pydoc >/dev/null 2>&1
-
-    # reset old environment variables
-    # ! [ -z ${VAR+_} ] returns true if VAR is declared at all
-    if ! [ -z "${_OLD_VIRTUAL_PATH+_}" ] ; then
-        PATH="$_OLD_VIRTUAL_PATH"
-        export PATH
-        unset _OLD_VIRTUAL_PATH
-    fi
-    if ! [ -z "${_OLD_VIRTUAL_PYTHONHOME+_}" ] ; then
-        PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME"
-        export PYTHONHOME
-        unset _OLD_VIRTUAL_PYTHONHOME
-    fi
-
-    # This should detect bash and zsh, which have a hash command that must
-    # be called to get it to forget past commands.  Without forgetting
-    # past commands the $PATH changes we made may not be respected
-    if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ] ; then
-        hash -r 2>/dev/null
-    fi
-
-    if ! [ -z "${_OLD_VIRTUAL_PS1+_}" ] ; then
-        PS1="$_OLD_VIRTUAL_PS1"
-        export PS1
-        unset _OLD_VIRTUAL_PS1
-    fi
-
-    unset VIRTUAL_ENV
-    if [ ! "${1-}" = "nondestructive" ] ; then
-    # Self destruct!
-        unset -f deactivate
-    fi
-}
-
-# unset irrelevant variables
-deactivate nondestructive
-
-VIRTUAL_ENV="/Users/aqv3407/Documents/TAPRepos/dockersetup/incubator-senssoft-tap/env2"
-export VIRTUAL_ENV
-
-_OLD_VIRTUAL_PATH="$PATH"
-PATH="$VIRTUAL_ENV/bin:$PATH"
-export PATH
-
-# unset PYTHONHOME if set
-if ! [ -z "${PYTHONHOME+_}" ] ; then
-    _OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME"
-    unset PYTHONHOME
-fi
-
-if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT-}" ] ; then
-    _OLD_VIRTUAL_PS1="$PS1"
-    if [ "x" != x ] ; then
-        PS1="$PS1"
-    else
-        PS1="(`basename \"$VIRTUAL_ENV\"`) $PS1"
-    fi
-    export PS1
-fi
-
-# Make sure to unalias pydoc if it's already there
-alias pydoc 2>/dev/null >/dev/null && unalias pydoc
-
-pydoc () {
-    python -m pydoc "$@"
-}
-
-# This should detect bash and zsh, which have a hash command that must
-# be called to get it to forget past commands.  Without forgetting
-# past commands the $PATH changes we made may not be respected
-if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ] ; then
-    hash -r 2>/dev/null
-fi
diff --git a/env2/bin/activate.csh b/env2/bin/activate.csh
deleted file mode 100644
index 5462cbb..0000000
--- a/env2/bin/activate.csh
+++ /dev/null
@@ -1,36 +0,0 @@
-# This file must be used with "source bin/activate.csh" *from csh*.
-# You cannot run it directly.
-# Created by Davide Di Blasi <davidedb@gmail.com>.
-
-alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate && unalias pydoc'
-
-# Unset irrelevant variables.
-deactivate nondestructive
-
-setenv VIRTUAL_ENV "/Users/aqv3407/Documents/TAPRepos/dockersetup/incubator-senssoft-tap/env2"
-
-set _OLD_VIRTUAL_PATH="$PATH"
-setenv PATH "$VIRTUAL_ENV/bin:$PATH"
-
-
-
-if ("" != "") then
-    set env_name = ""
-else
-    set env_name = `basename "$VIRTUAL_ENV"`
-endif
-
-# Could be in a non-interactive environment,
-# in which case, $prompt is undefined and we wouldn't
-# care about the prompt anyway.
-if ( $?prompt ) then
-    set _OLD_VIRTUAL_PROMPT="$prompt"
-    set prompt = "[$env_name] $prompt"
-endif
-
-unset env_name
-
-alias pydoc python -m pydoc
-
-rehash
-
diff --git a/env2/bin/activate.fish b/env2/bin/activate.fish
deleted file mode 100644
index 93280d9..0000000
--- a/env2/bin/activate.fish
+++ /dev/null
@@ -1,76 +0,0 @@
-# This file must be used using `. bin/activate.fish` *within a running fish ( http://fishshell.com ) session*.
-# Do not run it directly.
-
-function deactivate -d 'Exit virtualenv mode and return to the normal environment.'
-    # reset old environment variables
-    if test -n "$_OLD_VIRTUAL_PATH"
-        set -gx PATH $_OLD_VIRTUAL_PATH
-        set -e _OLD_VIRTUAL_PATH
-    end
-
-    if test -n "$_OLD_VIRTUAL_PYTHONHOME"
-        set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
-        set -e _OLD_VIRTUAL_PYTHONHOME
-    end
-
-    if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
-        # Set an empty local `$fish_function_path` to allow the removal of `fish_prompt` using `functions -e`.
-        set -l fish_function_path
-
-        # Erase virtualenv's `fish_prompt` and restore the original.
-        functions -e fish_prompt
-        functions -c _old_fish_prompt fish_prompt
-        functions -e _old_fish_prompt
-        set -e _OLD_FISH_PROMPT_OVERRIDE
-    end
-
-    set -e VIRTUAL_ENV
-
-    if test "$argv[1]" != 'nondestructive'
-        # Self-destruct!
-        functions -e pydoc
-        functions -e deactivate
-    end
-end
-
-# Unset irrelevant variables.
-deactivate nondestructive
-
-set -gx VIRTUAL_ENV "/Users/aqv3407/Documents/TAPRepos/dockersetup/incubator-senssoft-tap/env2"
-
-set -gx _OLD_VIRTUAL_PATH $PATH
-set -gx PATH "$VIRTUAL_ENV/bin" $PATH
-
-# Unset `$PYTHONHOME` if set.
-if set -q PYTHONHOME
-    set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
-    set -e PYTHONHOME
-end
-
-function pydoc
-    python -m pydoc $argv
-end
-
-if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
-    # Copy the current `fish_prompt` function as `_old_fish_prompt`.
-    functions -c fish_prompt _old_fish_prompt
-
-    function fish_prompt
-        # Save the current $status, for fish_prompts that display it.
-        set -l old_status $status
-
-        # Prompt override provided?
-        # If not, just prepend the environment name.
-        if test -n ""
-            printf '%s%s' "" (set_color normal)
-        else
-            printf '%s(%s) ' (set_color normal) (basename "$VIRTUAL_ENV")
-        end
-
-        # Restore the original $status
-        echo "exit $old_status" | source
-        _old_fish_prompt
-    end
-
-    set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
-end
diff --git a/env2/bin/activate_this.py b/env2/bin/activate_this.py
deleted file mode 100644
index f18193b..0000000
--- a/env2/bin/activate_this.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""By using execfile(this_file, dict(__file__=this_file)) you will
-activate this virtualenv environment.
-
-This can be used when you must use an existing Python interpreter, not
-the virtualenv bin/python
-"""
-
-try:
-    __file__
-except NameError:
-    raise AssertionError(
-        "You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))")
-import sys
-import os
-
-old_os_path = os.environ.get('PATH', '')
-os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path
-base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-if sys.platform == 'win32':
-    site_packages = os.path.join(base, 'Lib', 'site-packages')
-else:
-    site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages')
-prev_sys_path = list(sys.path)
-import site
-site.addsitedir(site_packages)
-sys.real_prefix = sys.prefix
-sys.prefix = base
-# Move the added items to the front of the path:
-new_sys_path = []
-for item in list(sys.path):
-    if item not in prev_sys_path:
-        new_sys_path.append(item)
-        sys.path.remove(item)
-sys.path[:0] = new_sys_path
diff --git a/env2/bin/docker-compose b/env2/bin/docker-compose
deleted file mode 100755
index be103b6..0000000
--- a/env2/bin/docker-compose
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/aqv3407/Documents/TAPRepos/dockersetup/incubator-senssoft-tap/env2/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from compose.cli.main import main
-
-if __name__ == '__main__':
-    sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
-    sys.exit(main())
diff --git a/env2/bin/easy_install b/env2/bin/easy_install
deleted file mode 100755
index b00ebdf..0000000
--- a/env2/bin/easy_install
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/aqv3407/Documents/TAPRepos/dockersetup/incubator-senssoft-tap/env2/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from setuptools.command.easy_install import main
-
-if __name__ == '__main__':
-    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
-    sys.exit(main())
diff --git a/env2/bin/easy_install-2.7 b/env2/bin/easy_install-2.7
deleted file mode 100755
index b00ebdf..0000000
--- a/env2/bin/easy_install-2.7
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/aqv3407/Documents/TAPRepos/dockersetup/incubator-senssoft-tap/env2/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from setuptools.command.easy_install import main
-
-if __name__ == '__main__':
-    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
-    sys.exit(main())
diff --git a/env2/bin/jsonschema b/env2/bin/jsonschema
deleted file mode 100755
index fb4d2bf..0000000
--- a/env2/bin/jsonschema
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/aqv3407/Documents/TAPRepos/dockersetup/incubator-senssoft-tap/env2/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from jsonschema.cli import main
-
-if __name__ == '__main__':
-    sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
-    sys.exit(main())
diff --git a/env2/bin/pip b/env2/bin/pip
deleted file mode 100755
index a6f8d10..0000000
--- a/env2/bin/pip
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/aqv3407/Documents/TAPRepos/dockersetup/incubator-senssoft-tap/env2/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from pip import main
-
-if __name__ == '__main__':
-    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
-    sys.exit(main())
diff --git a/env2/bin/pip2 b/env2/bin/pip2
deleted file mode 100755
index a6f8d10..0000000
--- a/env2/bin/pip2
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/aqv3407/Documents/TAPRepos/dockersetup/incubator-senssoft-tap/env2/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from pip import main
-
-if __name__ == '__main__':
-    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
-    sys.exit(main())
diff --git a/env2/bin/pip2.7 b/env2/bin/pip2.7
deleted file mode 100755
index a6f8d10..0000000
--- a/env2/bin/pip2.7
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/aqv3407/Documents/TAPRepos/dockersetup/incubator-senssoft-tap/env2/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from pip import main
-
-if __name__ == '__main__':
-    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
-    sys.exit(main())
diff --git a/env2/bin/python b/env2/bin/python
deleted file mode 100755
index 947e565..0000000
--- a/env2/bin/python
+++ /dev/null
Binary files differ
diff --git a/env2/bin/python-config b/env2/bin/python-config
deleted file mode 100755
index eb32b1c..0000000
--- a/env2/bin/python-config
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/Users/aqv3407/Documents/TAPRepos/dockersetup/incubator-senssoft-tap/env2/bin/python
-
-import sys
-import getopt
-import sysconfig
-
-valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
-              'ldflags', 'help']
-
-if sys.version_info >= (3, 2):
-    valid_opts.insert(-1, 'extension-suffix')
-    valid_opts.append('abiflags')
-if sys.version_info >= (3, 3):
-    valid_opts.append('configdir')
-
-
-def exit_with_usage(code=1):
-    sys.stderr.write("Usage: {0} [{1}]\n".format(
-        sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
-    sys.exit(code)
-
-try:
-    opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
-except getopt.error:
-    exit_with_usage()
-
-if not opts:
-    exit_with_usage()
-
-pyver = sysconfig.get_config_var('VERSION')
-getvar = sysconfig.get_config_var
-
-opt_flags = [flag for (flag, val) in opts]
-
-if '--help' in opt_flags:
-    exit_with_usage(code=0)
-
-for opt in opt_flags:
-    if opt == '--prefix':
-        print(sysconfig.get_config_var('prefix'))
-
-    elif opt == '--exec-prefix':
-        print(sysconfig.get_config_var('exec_prefix'))
-
-    elif opt in ('--includes', '--cflags'):
-        flags = ['-I' + sysconfig.get_path('include'),
-                 '-I' + sysconfig.get_path('platinclude')]
-        if opt == '--cflags':
-            flags.extend(getvar('CFLAGS').split())
-        print(' '.join(flags))
-
-    elif opt in ('--libs', '--ldflags'):
-        abiflags = getattr(sys, 'abiflags', '')
-        libs = ['-lpython' + pyver + abiflags]
-        libs += getvar('LIBS').split()
-        libs += getvar('SYSLIBS').split()
-        # add the prefix/lib/pythonX.Y/config dir, but only if there is no
-        # shared library in prefix/lib/.
-        if opt == '--ldflags':
-            if not getvar('Py_ENABLE_SHARED'):
-                libs.insert(0, '-L' + getvar('LIBPL'))
-            if not getvar('PYTHONFRAMEWORK'):
-                libs.extend(getvar('LINKFORSHARED').split())
-        print(' '.join(libs))
-
-    elif opt == '--extension-suffix':
-        ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
-        if ext_suffix is None:
-            ext_suffix = sysconfig.get_config_var('SO')
-        print(ext_suffix)
-
-    elif opt == '--abiflags':
-        if not getattr(sys, 'abiflags', None):
-            exit_with_usage()
-        print(sys.abiflags)
-
-    elif opt == '--configdir':
-        print(sysconfig.get_config_var('LIBPL'))
diff --git a/env2/bin/python2 b/env2/bin/python2
deleted file mode 120000
index d8654aa..0000000
--- a/env2/bin/python2
+++ /dev/null
@@ -1 +0,0 @@
-python
\ No newline at end of file
diff --git a/env2/bin/python2.7 b/env2/bin/python2.7
deleted file mode 120000
index d8654aa..0000000
--- a/env2/bin/python2.7
+++ /dev/null
@@ -1 +0,0 @@
-python
\ No newline at end of file
diff --git a/env2/bin/wheel b/env2/bin/wheel
deleted file mode 100755
index 5ba3210..0000000
--- a/env2/bin/wheel
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/aqv3407/Documents/TAPRepos/dockersetup/incubator-senssoft-tap/env2/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from wheel.tool import main
-
-if __name__ == '__main__':
-    sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
-    sys.exit(main())
diff --git a/env2/bin/wsdump.py b/env2/bin/wsdump.py
deleted file mode 100755
index 6a57857..0000000
--- a/env2/bin/wsdump.py
+++ /dev/null
@@ -1,185 +0,0 @@
-#!/Users/aqv3407/Documents/TAPRepos/dockersetup/incubator-senssoft-tap/env2/bin/python
-
-import argparse
-import code
-import six
-import sys
-import threading
-import time
-import websocket
-from six.moves.urllib.parse import urlparse
-try:
-    import readline
-except:
-    pass
-
-
-def get_encoding():
-    encoding = getattr(sys.stdin, "encoding", "")
-    if not encoding:
-        return "utf-8"
-    else:
-        return encoding.lower()
-
-
-OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
-ENCODING = get_encoding()
-
-
-class VAction(argparse.Action):
-    def __call__(self, parser, args, values, option_string=None):
-        if values==None:
-            values = "1"
-        try:
-            values = int(values)
-        except ValueError:
-            values = values.count("v")+1
-        setattr(args, self.dest, values)
-
-def parse_args():
-    parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
-    parser.add_argument("url", metavar="ws_url",
-                        help="websocket url. ex. ws://echo.websocket.org/")
-    parser.add_argument("-p", "--proxy",
-                        help="proxy url. ex. http://127.0.0.1:8080")
-    parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
-                        dest="verbose",
-                        help="set verbose mode. If set to 1, show opcode. "
-                        "If set to 2, enable to trace  websocket module")
-    parser.add_argument("-n", "--nocert", action='store_true',
-                        help="Ignore invalid SSL cert")
-    parser.add_argument("-r", "--raw", action="store_true",
-                        help="raw output")
-    parser.add_argument("-s", "--subprotocols", nargs='*',
-                        help="Set subprotocols")
-    parser.add_argument("-o", "--origin",
-                        help="Set origin")
-    parser.add_argument("--eof-wait", default=0, type=int,
-                        help="wait time(second) after 'EOF' received.")
-    parser.add_argument("-t", "--text",
-                        help="Send initial text")
-    parser.add_argument("--timings", action="store_true",
-                        help="Print timings in seconds")
-
-    return parser.parse_args()
-
-class RawInput():
-    def raw_input(self, prompt):
-        if six.PY3:
-            line = input(prompt)
-        else:
-            line = raw_input(prompt)
-
-        if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
-            line = line.decode(ENCODING).encode("utf-8")
-        elif isinstance(line, six.text_type):
-            line = line.encode("utf-8")
-
-        return line
-
-class InteractiveConsole(RawInput, code.InteractiveConsole):
-    def write(self, data):
-        sys.stdout.write("\033[2K\033[E")
-        # sys.stdout.write("\n")
-        sys.stdout.write("\033[34m< " + data + "\033[39m")
-        sys.stdout.write("\n> ")
-        sys.stdout.flush()
-
-    def read(self):
-        return self.raw_input("> ")
-
-class NonInteractive(RawInput):
-    def write(self, data):
-        sys.stdout.write(data)
-        sys.stdout.write("\n")
-        sys.stdout.flush()
-
-    def read(self):
-        return self.raw_input("")
-
-def main():
-    start_time = time.time()
-    args = parse_args()
-    if args.verbose > 1:
-        websocket.enableTrace(True)
-    options = {}
-    if (args.proxy):
-        p = urlparse(args.proxy)
-        options["http_proxy_host"] = p.hostname
-        options["http_proxy_port"] = p.port
-    if (args.origin):
-        options["origin"] = args.origin
-    if (args.subprotocols):
-        options["subprotocols"] = args.subprotocols
-    opts = {}
-    if (args.nocert):
-        opts = { "cert_reqs": websocket.ssl.CERT_NONE, "check_hostname": False }
-    ws = websocket.create_connection(args.url, sslopt=opts, **options)
-    if args.raw:
-        console = NonInteractive()
-    else:
-        console = InteractiveConsole()
-        print("Press Ctrl+C to quit")
-
-    def recv():
-        try:
-            frame = ws.recv_frame()
-        except websocket.WebSocketException:
-            return (websocket.ABNF.OPCODE_CLOSE, None)
-        if not frame:
-            raise websocket.WebSocketException("Not a valid frame %s" % frame)
-        elif frame.opcode in OPCODE_DATA:
-            return (frame.opcode, frame.data)
-        elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
-            ws.send_close()
-            return (frame.opcode, None)
-        elif frame.opcode == websocket.ABNF.OPCODE_PING:
-            ws.pong(frame.data)
-            return frame.opcode, frame.data
-
-        return frame.opcode, frame.data
-
-
-    def recv_ws():
-        while True:
-            opcode, data = recv()
-            msg = None
-            if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
-                data = str(data, "utf-8")
-            if not args.verbose and opcode in OPCODE_DATA:
-                msg = data
-            elif args.verbose:
-                msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
-
-            if msg is not None:
-                if (args.timings):
-                    console.write(str(time.time() - start_time) + ": " + msg)
-                else:
-                    console.write(msg)
-
-            if opcode == websocket.ABNF.OPCODE_CLOSE:
-                break
-
-    thread = threading.Thread(target=recv_ws)
-    thread.daemon = True
-    thread.start()
-
-    if args.text:
-        ws.send(args.text)
-
-    while True:
-        try:
-            message = console.read()
-            ws.send(message)
-        except KeyboardInterrupt:
-            return
-        except EOFError:
-            time.sleep(args.eof_wait)
-            return
-
-
-if __name__ == "__main__":
-    try:
-        main()
-    except Exception as e:
-        print(e)
diff --git a/env2/include/python2.7 b/env2/include/python2.7
deleted file mode 120000
index 3fe034f..0000000
--- a/env2/include/python2.7
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7
\ No newline at end of file
diff --git a/env2/lib/python2.7/UserDict.py b/env2/lib/python2.7/UserDict.py
deleted file mode 120000
index b735f02..0000000
--- a/env2/lib/python2.7/UserDict.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/UserDict.py
\ No newline at end of file
diff --git a/env2/lib/python2.7/_abcoll.py b/env2/lib/python2.7/_abcoll.py
deleted file mode 120000
index 4a595bc..0000000
--- a/env2/lib/python2.7/_abcoll.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/_abcoll.py
\ No newline at end of file
diff --git a/env2/lib/python2.7/_weakrefset.py b/env2/lib/python2.7/_weakrefset.py
deleted file mode 120000
index b8b09b7..0000000
--- a/env2/lib/python2.7/_weakrefset.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/_weakrefset.py
\ No newline at end of file
diff --git a/env2/lib/python2.7/abc.py b/env2/lib/python2.7/abc.py
deleted file mode 120000
index 87956e5..0000000
--- a/env2/lib/python2.7/abc.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/abc.py
\ No newline at end of file
diff --git a/env2/lib/python2.7/codecs.py b/env2/lib/python2.7/codecs.py
deleted file mode 120000
index b18c8d6..0000000
--- a/env2/lib/python2.7/codecs.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/codecs.py
\ No newline at end of file
diff --git a/env2/lib/python2.7/config b/env2/lib/python2.7/config
deleted file mode 120000
index 88ddfa1..0000000
--- a/env2/lib/python2.7/config
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/config
\ No newline at end of file
diff --git a/env2/lib/python2.7/copy_reg.py b/env2/lib/python2.7/copy_reg.py
deleted file mode 120000
index 8d0265c..0000000
--- a/env2/lib/python2.7/copy_reg.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/copy_reg.py
\ No newline at end of file
diff --git a/env2/lib/python2.7/distutils/__init__.py b/env2/lib/python2.7/distutils/__init__.py
deleted file mode 100644
index 29fc1da..0000000
--- a/env2/lib/python2.7/distutils/__init__.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import os
-import sys
-import warnings 
-import imp
-import opcode # opcode is not a virtualenv module, so we can use it to find the stdlib
-              # Important! To work on pypy, this must be a module that resides in the
-              # lib-python/modified-x.y.z directory
-
-dirname = os.path.dirname
-
-distutils_path = os.path.join(os.path.dirname(opcode.__file__), 'distutils')
-if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)):
-    warnings.warn(
-        "The virtualenv distutils package at %s appears to be in the same location as the system distutils?")
-else:
-    __path__.insert(0, distutils_path)
-    real_distutils = imp.load_module("_virtualenv_distutils", None, distutils_path, ('', '', imp.PKG_DIRECTORY))
-    # Copy the relevant attributes
-    try:
-        __revision__ = real_distutils.__revision__
-    except AttributeError:
-        pass
-    __version__ = real_distutils.__version__
-
-from distutils import dist, sysconfig
-
-try:
-    basestring
-except NameError:
-    basestring = str
-
-## patch build_ext (distutils doesn't know how to get the libs directory
-## path on windows - it hardcodes the paths around the patched sys.prefix)
-
-if sys.platform == 'win32':
-    from distutils.command.build_ext import build_ext as old_build_ext
-    class build_ext(old_build_ext):
-        def finalize_options (self):
-            if self.library_dirs is None:
-                self.library_dirs = []
-            elif isinstance(self.library_dirs, basestring):
-                self.library_dirs = self.library_dirs.split(os.pathsep)
-            
-            self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs"))
-            old_build_ext.finalize_options(self)
-            
-    from distutils.command import build_ext as build_ext_module 
-    build_ext_module.build_ext = build_ext
-
-## distutils.dist patches:
-
-old_find_config_files = dist.Distribution.find_config_files
-def find_config_files(self):
-    found = old_find_config_files(self)
-    system_distutils = os.path.join(distutils_path, 'distutils.cfg')
-    #if os.path.exists(system_distutils):
-    #    found.insert(0, system_distutils)
-        # What to call the per-user config file
-    if os.name == 'posix':
-        user_filename = ".pydistutils.cfg"
-    else:
-        user_filename = "pydistutils.cfg"
-    user_filename = os.path.join(sys.prefix, user_filename)
-    if os.path.isfile(user_filename):
-        for item in list(found):
-            if item.endswith('pydistutils.cfg'):
-                found.remove(item)
-        found.append(user_filename)
-    return found
-dist.Distribution.find_config_files = find_config_files
-
-## distutils.sysconfig patches:
-
-old_get_python_inc = sysconfig.get_python_inc
-def sysconfig_get_python_inc(plat_specific=0, prefix=None):
-    if prefix is None:
-        prefix = sys.real_prefix
-    return old_get_python_inc(plat_specific, prefix)
-sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__
-sysconfig.get_python_inc = sysconfig_get_python_inc
-
-old_get_python_lib = sysconfig.get_python_lib
-def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
-    if standard_lib and prefix is None:
-        prefix = sys.real_prefix
-    return old_get_python_lib(plat_specific, standard_lib, prefix)
-sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__
-sysconfig.get_python_lib = sysconfig_get_python_lib
-
-old_get_config_vars = sysconfig.get_config_vars
-def sysconfig_get_config_vars(*args):
-    real_vars = old_get_config_vars(*args)
-    if sys.platform == 'win32':
-        lib_dir = os.path.join(sys.real_prefix, "libs")
-        if isinstance(real_vars, dict) and 'LIBDIR' not in real_vars:
-            real_vars['LIBDIR'] = lib_dir # asked for all
-        elif isinstance(real_vars, list) and 'LIBDIR' in args:
-            real_vars = real_vars + [lib_dir] # asked for list
-    return real_vars
-sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__
-sysconfig.get_config_vars = sysconfig_get_config_vars
diff --git a/env2/lib/python2.7/distutils/distutils.cfg b/env2/lib/python2.7/distutils/distutils.cfg
deleted file mode 100644
index 1af230e..0000000
--- a/env2/lib/python2.7/distutils/distutils.cfg
+++ /dev/null
@@ -1,6 +0,0 @@
-# This is a config file local to this virtualenv installation
-# You may include options that will be used by all distutils commands,
-# and by easy_install.  For instance:
-#
-#   [easy_install]
-#   find_links = http://mylocalsite
diff --git a/env2/lib/python2.7/encodings b/env2/lib/python2.7/encodings
deleted file mode 120000
index 8732f85..0000000
--- a/env2/lib/python2.7/encodings
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/encodings
\ No newline at end of file
diff --git a/env2/lib/python2.7/fnmatch.py b/env2/lib/python2.7/fnmatch.py
deleted file mode 120000
index 49b6bc0..0000000
--- a/env2/lib/python2.7/fnmatch.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/fnmatch.py
\ No newline at end of file
diff --git a/env2/lib/python2.7/genericpath.py b/env2/lib/python2.7/genericpath.py
deleted file mode 120000
index 7843bce..0000000
--- a/env2/lib/python2.7/genericpath.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/genericpath.py
\ No newline at end of file
diff --git a/env2/lib/python2.7/lib-dynload b/env2/lib/python2.7/lib-dynload
deleted file mode 120000
index 24c555e..0000000
--- a/env2/lib/python2.7/lib-dynload
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/lib-dynload
\ No newline at end of file
diff --git a/env2/lib/python2.7/linecache.py b/env2/lib/python2.7/linecache.py
deleted file mode 120000
index 1f79a61..0000000
--- a/env2/lib/python2.7/linecache.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/linecache.py
\ No newline at end of file
diff --git a/env2/lib/python2.7/locale.py b/env2/lib/python2.7/locale.py
deleted file mode 120000
index cc8a5a7..0000000
--- a/env2/lib/python2.7/locale.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/locale.py
\ No newline at end of file
diff --git a/env2/lib/python2.7/no-global-site-packages.txt b/env2/lib/python2.7/no-global-site-packages.txt
deleted file mode 100644
index e69de29..0000000
--- a/env2/lib/python2.7/no-global-site-packages.txt
+++ /dev/null
diff --git a/env2/lib/python2.7/ntpath.py b/env2/lib/python2.7/ntpath.py
deleted file mode 120000
index af0bbe7..0000000
--- a/env2/lib/python2.7/ntpath.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ntpath.py
\ No newline at end of file
diff --git a/env2/lib/python2.7/orig-prefix.txt b/env2/lib/python2.7/orig-prefix.txt
deleted file mode 100644
index 2a45120..0000000
--- a/env2/lib/python2.7/orig-prefix.txt
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7
\ No newline at end of file
diff --git a/env2/lib/python2.7/os.py b/env2/lib/python2.7/os.py
deleted file mode 120000
index 04db928..0000000
--- a/env2/lib/python2.7/os.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/os.py
\ No newline at end of file
diff --git a/env2/lib/python2.7/posixpath.py b/env2/lib/python2.7/posixpath.py
deleted file mode 120000
index cc89aa2..0000000
--- a/env2/lib/python2.7/posixpath.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/posixpath.py
\ No newline at end of file
diff --git a/env2/lib/python2.7/re.py b/env2/lib/python2.7/re.py
deleted file mode 120000
index b1a8e65..0000000
--- a/env2/lib/python2.7/re.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/re.py
\ No newline at end of file
diff --git a/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/DESCRIPTION.rst b/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/DESCRIPTION.rst
deleted file mode 100644
index 94d55d3..0000000
--- a/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/DESCRIPTION.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-YAML is a data serialization format designed for human readability
-and interaction with scripting languages.  PyYAML is a YAML parser
-and emitter for Python.
-
-PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
-support, capable extension API, and sensible error messages.  PyYAML
-supports standard YAML tags and provides Python-specific tags that
-allow to represent an arbitrary Python object.
-
-PyYAML is applicable for a broad range of tasks from complex
-configuration files to object serialization and persistance.
-
diff --git a/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/INSTALLER b/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/INSTALLER
deleted file mode 100644
index a1b589e..0000000
--- a/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/METADATA b/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/METADATA
deleted file mode 100644
index 0839366..0000000
--- a/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/METADATA
+++ /dev/null
@@ -1,35 +0,0 @@
-Metadata-Version: 2.0
-Name: PyYAML
-Version: 3.12
-Summary: YAML parser and emitter for Python
-Home-page: http://pyyaml.org/wiki/PyYAML
-Author: Kirill Simonov
-Author-email: xi@resolvent.net
-License: MIT
-Download-URL: http://pyyaml.org/download/pyyaml/PyYAML-3.12.tar.gz
-Platform: Any
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Classifier: Topic :: Text Processing :: Markup
-
-YAML is a data serialization format designed for human readability
-and interaction with scripting languages.  PyYAML is a YAML parser
-and emitter for Python.
-
-PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
-support, capable extension API, and sensible error messages.  PyYAML
-supports standard YAML tags and provides Python-specific tags that
-allow to represent an arbitrary Python object.
-
-PyYAML is applicable for a broad range of tasks from complex
-configuration files to object serialization and persistance.
-
diff --git a/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/RECORD b/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/RECORD
deleted file mode 100644
index d0c14a2..0000000
--- a/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/RECORD
+++ /dev/null
@@ -1,41 +0,0 @@
-PyYAML-3.12.dist-info/DESCRIPTION.rst,sha256=4nzkrOwMTYfusIfdRz4-dl_9Blan5axHPKMiVJEOV-4,534

-PyYAML-3.12.dist-info/METADATA,sha256=pLhIwT2X7saN_aEwYCwTMYl0U9KDA4jYEyL140QAOJg,1424

-PyYAML-3.12.dist-info/RECORD,,

-PyYAML-3.12.dist-info/WHEEL,sha256=c72nWt1i7I3ForbdXG35p-8sTv3cN_E1sA3uLvkN51M,110

-PyYAML-3.12.dist-info/metadata.json,sha256=ACNazIcYAAYWEcmweRN5f76IWRewL5YSVmjkqEx4gFo,1013

-PyYAML-3.12.dist-info/top_level.txt,sha256=mBo8NF3j3lG5BeAeV7Eg19Y0FKQnxcXuw9SUEemIwB4,11

-yaml/__init__.py,sha256=qfSzlV4ZwTZZBGZQg7CfDRj4cfVXlk4B7cltM61_ZyU,9776

-yaml/composer.py,sha256=pOjZ5afqNfH22WXyS6xlQCB2PbSrFPjK-qFPOEI76fw,4921

-yaml/constructor.py,sha256=S_Pux76-hgmgtJeJVtSvQ9ynmtEIR2jAx2ljAochKU0,25145

-yaml/cyaml.py,sha256=xK_IxkrRcetZeNwB_wzDAHYCWsumOFfsTlk3CeoM5kQ,3290

-yaml/dumper.py,sha256=ONPYNHirnLm-qCm-h9swnMWzZhncilexboIPRoNdcq4,2719

-yaml/emitter.py,sha256=Xya7zhTX3ykxMAdAgDIedejmLb1Q71W2G4yt4nTSMIM,43298

-yaml/error.py,sha256=7K-NdIv0qNKPKbnXxEg0L_b9K7nYDORr3rzm8_b-iBY,2559

-yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445

-yaml/loader.py,sha256=t_WLbw1-iWQ4KT_FUppJu30cFIU-l8NCb7bjoXJoV6A,1132

-yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440

-yaml/parser.py,sha256=sgXahZA3DkySYnaC4D_zcl3l2y4Y5R40icWtdwkF_NE,25542

-yaml/reader.py,sha256=hKuxSbid1rSlfKBsshf5qaPwVduaCJA5t5S9Jum6CAA,6746

-yaml/representer.py,sha256=x3F9vDF4iiPit8sR8tgR-kjtotWTzH_Zv9moq0fMtlY,17711

-yaml/resolver.py,sha256=5Z3boiMikL6Qt6fS5Mt8fHym0GxbW7CMT2f2fnD1ZPQ,9122

-yaml/scanner.py,sha256=ft5i4fP9m0MrpKY9N8Xa24H1LqKhwGQXLG1Hd9gCSsk,52446

-yaml/serializer.py,sha256=tRsRwfu5E9fpLU7LY3vBQf2prt77hwnYlMt5dnBJLig,4171

-yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573

-PyYAML-3.12.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4

-yaml/loader.pyc,,

-yaml/__init__.pyc,,

-yaml/reader.pyc,,

-yaml/cyaml.pyc,,

-yaml/resolver.pyc,,

-yaml/constructor.pyc,,

-yaml/scanner.pyc,,

-yaml/dumper.pyc,,

-yaml/serializer.pyc,,

-yaml/nodes.pyc,,

-yaml/events.pyc,,

-yaml/representer.pyc,,

-yaml/error.pyc,,

-yaml/tokens.pyc,,

-yaml/parser.pyc,,

-yaml/composer.pyc,,

-yaml/emitter.pyc,,

diff --git a/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/WHEEL b/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/WHEEL
deleted file mode 100644
index 0d9cf7a..0000000
--- a/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/WHEEL
+++ /dev/null
@@ -1,5 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.29.0)
-Root-Is-Purelib: false
-Tag: cp27-cp27m-macosx_10_11_intel
-
diff --git a/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/metadata.json b/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/metadata.json
deleted file mode 100644
index 73f25a1..0000000
--- a/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/metadata.json
+++ /dev/null
@@ -1 +0,0 @@
-{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup"], "download_url": "http://pyyaml.org/download/pyyaml/PyYAML-3.12.tar.gz", "extensions": {"python.details": {"contacts": [{"email": "xi@resolvent.net", "name": "Kirill Simonov", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://pyyaml.org/wiki/PyYAML"}}}, "generator": "bdist_wheel (0.29.0)", "license": "MIT", "metadata_version": "2.0", "name": "PyYAML", "platform": "Any", "summary": "YAML parser and emitter for Python", "version": "3.12"}
\ No newline at end of file
diff --git a/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/top_level.txt b/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/top_level.txt
deleted file mode 100644
index 7a159e7..0000000
--- a/env2/lib/python2.7/site-packages/PyYAML-3.12.dist-info/top_level.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-yaml
-_yaml
diff --git a/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/DESCRIPTION.rst b/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/DESCRIPTION.rst
deleted file mode 100644
index b8800d6..0000000
--- a/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/DESCRIPTION.rst
+++ /dev/null
@@ -1,70 +0,0 @@
-The ssl.match_hostname() function from Python 3.5
-=================================================
-
-The Secure Sockets Layer is only actually *secure*
-if you check the hostname in the certificate returned
-by the server to which you are connecting,
-and verify that it matches to hostname
-that you are trying to reach.
-
-But the matching logic, defined in `RFC2818`_,
-can be a bit tricky to implement on your own.
-So the ``ssl`` package in the Standard Library of Python 3.2
-and greater now includes a ``match_hostname()`` function
-for performing this check instead of requiring every application
-to implement the check separately.
-
-This backport brings ``match_hostname()`` to users
-of earlier versions of Python.
-Simply make this distribution a dependency of your package,
-and then use it like this::
-
-    from backports.ssl_match_hostname import match_hostname, CertificateError
-    [...]
-    sslsock = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_SSLv23,
-                              cert_reqs=ssl.CERT_REQUIRED, ca_certs=...)
-    try:
-        match_hostname(sslsock.getpeercert(), hostname)
-    except CertificateError, ce:
-        ...
-
-Brandon Craig Rhodes is merely the packager of this distribution;
-the actual code inside comes from Python 3.5 with small changes for
-portability.
-
-
-Requirements
-------------
-
-* If you want to verify hosts match with certificates via ServerAltname
-  IPAddress fields, you need to install the `ipaddress module`_.
-  backports.ssl_match_hostname will continue to work without ipaddress but
-  will only be able to handle ServerAltName DNSName fields, not IPAddress.
-  System packagers (Linux distributions, et al) are encouraged to add
-  this as a hard dependency in their packages.
-
-* If you need to use this on Python versions earlier than 2.6 you will need to
-  install the `ssl module`_.  From Python 2.6 upwards ``ssl`` is included in
-  the Python Standard Library so you do not need to install it separately.
-
-.. _`ipaddress module`:: https://pypi.python.org/pypi/ipaddress
-.. _`ssl module`:: https://pypi.python.org/pypi/ssl
-
-History
--------
-
-* This function was introduced in python-3.2
-* It was updated for python-3.4a1 for a CVE 
-  (backports-ssl_match_hostname-3.4.0.1)
-* It was updated from RFC2818 to RFC 6125 compliance in order to fix another
-  security flaw for python-3.3.3 and python-3.4a5
-  (backports-ssl_match_hostname-3.4.0.2)
-* It was updated in python-3.5 to handle IPAddresses in ServerAltName fields
-  (something that backports.ssl_match_hostname will do if you also install the
-  ipaddress library from pypi).
-
-
-.. _RFC2818: http://tools.ietf.org/html/rfc2818.html
-
-
-
diff --git a/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/INSTALLER b/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/INSTALLER
deleted file mode 100644
index a1b589e..0000000
--- a/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/METADATA b/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/METADATA
deleted file mode 100644
index 550988e..0000000
--- a/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/METADATA
+++ /dev/null
@@ -1,90 +0,0 @@
-Metadata-Version: 2.0
-Name: backports.ssl-match-hostname
-Version: 3.5.0.1
-Summary: The ssl.match_hostname() function from Python 3.5
-Home-page: http://bitbucket.org/brandon/backports.ssl_match_hostname
-Author: Toshio Kuratomi
-Author-email: toshio@fedoraproject.org
-License: Python Software Foundation License
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: License :: OSI Approved :: Python Software Foundation License
-Classifier: Programming Language :: Python :: 2.4
-Classifier: Programming Language :: Python :: 2.5
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.0
-Classifier: Programming Language :: Python :: 3.1
-Classifier: Topic :: Security :: Cryptography
-
-The ssl.match_hostname() function from Python 3.5
-=================================================
-
-The Secure Sockets Layer is only actually *secure*
-if you check the hostname in the certificate returned
-by the server to which you are connecting,
-and verify that it matches to hostname
-that you are trying to reach.
-
-But the matching logic, defined in `RFC2818`_,
-can be a bit tricky to implement on your own.
-So the ``ssl`` package in the Standard Library of Python 3.2
-and greater now includes a ``match_hostname()`` function
-for performing this check instead of requiring every application
-to implement the check separately.
-
-This backport brings ``match_hostname()`` to users
-of earlier versions of Python.
-Simply make this distribution a dependency of your package,
-and then use it like this::
-
-    from backports.ssl_match_hostname import match_hostname, CertificateError
-    [...]
-    sslsock = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_SSLv23,
-                              cert_reqs=ssl.CERT_REQUIRED, ca_certs=...)
-    try:
-        match_hostname(sslsock.getpeercert(), hostname)
-    except CertificateError, ce:
-        ...
-
-Brandon Craig Rhodes is merely the packager of this distribution;
-the actual code inside comes from Python 3.5 with small changes for
-portability.
-
-
-Requirements
-------------
-
-* If you want to verify hosts match with certificates via ServerAltname
-  IPAddress fields, you need to install the `ipaddress module`_.
-  backports.ssl_match_hostname will continue to work without ipaddress but
-  will only be able to handle ServerAltName DNSName fields, not IPAddress.
-  System packagers (Linux distributions, et al) are encouraged to add
-  this as a hard dependency in their packages.
-
-* If you need to use this on Python versions earlier than 2.6 you will need to
-  install the `ssl module`_.  From Python 2.6 upwards ``ssl`` is included in
-  the Python Standard Library so you do not need to install it separately.
-
-.. _`ipaddress module`:: https://pypi.python.org/pypi/ipaddress
-.. _`ssl module`:: https://pypi.python.org/pypi/ssl
-
-History
--------
-
-* This function was introduced in python-3.2
-* It was updated for python-3.4a1 for a CVE 
-  (backports-ssl_match_hostname-3.4.0.1)
-* It was updated from RFC2818 to RFC 6125 compliance in order to fix another
-  security flaw for python-3.3.3 and python-3.4a5
-  (backports-ssl_match_hostname-3.4.0.2)
-* It was updated in python-3.5 to handle IPAddresses in ServerAltName fields
-  (something that backports.ssl_match_hostname will do if you also install the
-  ipaddress library from pypi).
-
-
-.. _RFC2818: http://tools.ietf.org/html/rfc2818.html
-
-
-
diff --git a/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/RECORD b/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/RECORD
deleted file mode 100644
index 0b501a8..0000000
--- a/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/RECORD
+++ /dev/null
@@ -1,11 +0,0 @@
-backports/__init__.py,sha256=1Mf6P1hIdW75vC992JfI0YyknVYpv1dH3_DZzUShnS0,155

-backports/ssl_match_hostname/__init__.py,sha256=hjwZEix1F_V1qLqTRRxypradJ9tKqTvrjW3FOOvNfXc,5574

-backports.ssl_match_hostname-3.5.0.1.dist-info/DESCRIPTION.rst,sha256=0zIPlb6puJyjxvzmHnKDMmgMASFn8UknYAeEOX02Hb4,2635

-backports.ssl_match_hostname-3.5.0.1.dist-info/METADATA,sha256=qTHOOSgi0oj9E_kwdUFJzj9zJ77v-SCDv0i7k39tiSM,3487

-backports.ssl_match_hostname-3.5.0.1.dist-info/RECORD,,

-backports.ssl_match_hostname-3.5.0.1.dist-info/WHEEL,sha256=BtVfdXUcEYLcFjOkbIrCFRyXU4qszVPt-E9o3RWkSNw,93

-backports.ssl_match_hostname-3.5.0.1.dist-info/metadata.json,sha256=u8N-bnr7EwOGQ2JEy5ytSx7vWWH3aNiu3PhTdanaK4U,965

-backports.ssl_match_hostname-3.5.0.1.dist-info/top_level.txt,sha256=cGjaLMOoBR1FK0ApojtzWVmViTtJ7JGIK_HwXiEsvtU,10

-backports.ssl_match_hostname-3.5.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4

-backports/__init__.pyc,,

-backports/ssl_match_hostname/__init__.pyc,,

diff --git a/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/WHEEL b/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/WHEEL
deleted file mode 100644
index 5a93381..0000000
--- a/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/WHEEL
+++ /dev/null
@@ -1,5 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.29.0)
-Root-Is-Purelib: true
-Tag: cp27-none-any
-
diff --git a/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/metadata.json b/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/metadata.json
deleted file mode 100644
index 1511aca..0000000
--- a/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/metadata.json
+++ /dev/null
@@ -1 +0,0 @@
-{"classifiers": ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Python Software Foundation License", "Programming Language :: Python :: 2.4", "Programming Language :: Python :: 2.5", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.0", "Programming Language :: Python :: 3.1", "Topic :: Security :: Cryptography"], "extensions": {"python.details": {"contacts": [{"email": "toshio@fedoraproject.org", "name": "Toshio Kuratomi", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://bitbucket.org/brandon/backports.ssl_match_hostname"}}}, "generator": "bdist_wheel (0.29.0)", "license": "Python Software Foundation License", "metadata_version": "2.0", "name": "backports.ssl-match-hostname", "summary": "The ssl.match_hostname() function from Python 3.5", "version": "3.5.0.1"}
\ No newline at end of file
diff --git a/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/top_level.txt b/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/top_level.txt
deleted file mode 100644
index 99d2be5..0000000
--- a/env2/lib/python2.7/site-packages/backports.ssl_match_hostname-3.5.0.1.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-backports
diff --git a/env2/lib/python2.7/site-packages/backports/__init__.py b/env2/lib/python2.7/site-packages/backports/__init__.py
deleted file mode 100644
index 612d328..0000000
--- a/env2/lib/python2.7/site-packages/backports/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# This is a Python "namespace package" http://www.python.org/dev/peps/pep-0382/
-from pkgutil import extend_path
-__path__ = extend_path(__path__, __name__)
diff --git a/env2/lib/python2.7/site-packages/backports/ssl_match_hostname/__init__.py b/env2/lib/python2.7/site-packages/backports/ssl_match_hostname/__init__.py
deleted file mode 100644
index 06538ec..0000000
--- a/env2/lib/python2.7/site-packages/backports/ssl_match_hostname/__init__.py
+++ /dev/null
@@ -1,154 +0,0 @@
-"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
-
-import re
-import sys
-
-# ipaddress has been backported to 2.6+ in pypi.  If it is installed on the
-# system, use it to handle IPAddress ServerAltnames (this was added in
-# python-3.5) otherwise only do DNS matching.  This allows
-# backports.ssl_match_hostname to continue to be used all the way back to
-# python-2.4.
-try:
-    import ipaddress
-except ImportError:
-    ipaddress = None
-
-__version__ = '3.5.0.1'
-
-
-class CertificateError(ValueError):
-    pass
-
-
-def _dnsname_match(dn, hostname, max_wildcards=1):
-    """Matching according to RFC 6125, section 6.4.3
-
-    http://tools.ietf.org/html/rfc6125#section-6.4.3
-    """
-    pats = []
-    if not dn:
-        return False
-
-    # Ported from python3-syntax:
-    # leftmost, *remainder = dn.split(r'.')
-    parts = dn.split(r'.')
-    leftmost = parts[0]
-    remainder = parts[1:]
-
-    wildcards = leftmost.count('*')
-    if wildcards > max_wildcards:
-        # Issue #17980: avoid denials of service by refusing more
-        # than one wildcard per fragment.  A survey of established
-        # policy among SSL implementations showed it to be a
-        # reasonable choice.
-        raise CertificateError(
-            "too many wildcards in certificate DNS name: " + repr(dn))
-
-    # speed up common case w/o wildcards
-    if not wildcards:
-        return dn.lower() == hostname.lower()
-
-    # RFC 6125, section 6.4.3, subitem 1.
-    # The client SHOULD NOT attempt to match a presented identifier in which
-    # the wildcard character comprises a label other than the left-most label.
-    if leftmost == '*':
-        # When '*' is a fragment by itself, it matches a non-empty dotless
-        # fragment.
-        pats.append('[^.]+')
-    elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
-        # RFC 6125, section 6.4.3, subitem 3.
-        # The client SHOULD NOT attempt to match a presented identifier
-        # where the wildcard character is embedded within an A-label or
-        # U-label of an internationalized domain name.
-        pats.append(re.escape(leftmost))
-    else:
-        # Otherwise, '*' matches any dotless string, e.g. www*
-        pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
-
-    # add the remaining fragments, ignore any wildcards
-    for frag in remainder:
-        pats.append(re.escape(frag))
-
-    pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
-    return pat.match(hostname)
-
-
-def _to_unicode(obj):
-    if isinstance(obj, str) and sys.version_info < (3,):
-        obj = unicode(obj, encoding='ascii', errors='strict')
-    return obj
-
-def _ipaddress_match(ipname, host_ip):
-    """Exact matching of IP addresses.
-
-    RFC 6125 explicitly doesn't define an algorithm for this
-    (section 1.7.2 - "Out of Scope").
-    """
-    # OpenSSL may add a trailing newline to a subjectAltName's IP address
-    # Divergence from upstream: ipaddress can't handle byte str
-    ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
-    return ip == host_ip
-
-
-def match_hostname(cert, hostname):
-    """Verify that *cert* (in decoded format as returned by
-    SSLSocket.getpeercert()) matches the *hostname*.  RFC 2818 and RFC 6125
-    rules are followed, but IP addresses are not accepted for *hostname*.
-
-    CertificateError is raised on failure. On success, the function
-    returns nothing.
-    """
-    if not cert:
-        raise ValueError("empty or no certificate, match_hostname needs a "
-                         "SSL socket or SSL context with either "
-                         "CERT_OPTIONAL or CERT_REQUIRED")
-    try:
-        # Divergence from upstream: ipaddress can't handle byte str
-        host_ip = ipaddress.ip_address(_to_unicode(hostname))
-    except ValueError:
-        # Not an IP address (common case)
-        host_ip = None
-    except UnicodeError:
-        # Divergence from upstream: Have to deal with ipaddress not taking
-        # byte strings.  addresses should be all ascii, so we consider it not
-        # an ipaddress in this case
-        host_ip = None
-    except AttributeError:
-        # Divergence from upstream: Make ipaddress library optional
-        if ipaddress is None:
-            host_ip = None
-        else:
-            raise
-    dnsnames = []
-    san = cert.get('subjectAltName', ())
-    for key, value in san:
-        if key == 'DNS':
-            if host_ip is None and _dnsname_match(value, hostname):
-                return
-            dnsnames.append(value)
-        elif key == 'IP Address':
-            if host_ip is not None and _ipaddress_match(value, host_ip):
-                return
-            dnsnames.append(value)
-    if not dnsnames:
-        # The subject is only checked when there is no dNSName entry
-        # in subjectAltName
-        for sub in cert.get('subject', ()):
-            for key, value in sub:
-                # XXX according to RFC 2818, the most specific Common Name
-                # must be used.
-                if key == 'commonName':
-                    if _dnsname_match(value, hostname):
-                        return
-                    dnsnames.append(value)
-    if len(dnsnames) > 1:
-        raise CertificateError("hostname %r "
-            "doesn't match either of %s"
-            % (hostname, ', '.join(map(repr, dnsnames))))
-    elif len(dnsnames) == 1:
-        raise CertificateError("hostname %r "
-            "doesn't match %r"
-            % (hostname, dnsnames[0]))
-    else:
-        raise CertificateError("no appropriate commonName or "
-            "subjectAltName fields were found")
diff --git a/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/DESCRIPTION.rst b/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/DESCRIPTION.rst
deleted file mode 100644
index 0387194..0000000
--- a/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/DESCRIPTION.rst
+++ /dev/null
@@ -1,264 +0,0 @@
-===============================
-cached-property
-===============================
-
-.. image:: https://img.shields.io/pypi/v/cached-property.svg
-    :target: https://pypi.python.org/pypi/cached-property
-
-.. image:: https://img.shields.io/travis/pydanny/cached-property/master.svg
-        :target: https://travis-ci.org/pydanny/cached-property
-
-
-A decorator for caching properties in classes.
-
-Why?
------
-
-* Makes caching of time or computational expensive properties quick and easy.
-* Because I got tired of copy/pasting this code from non-web project to non-web project.
-* I needed something really simple that worked in Python 2 and 3.
-
-How to use it
---------------
-
-Let's define a class with an expensive property. Every time you stay there the
-price goes up by $50!
-
-.. code-block:: python
-
-    class Monopoly(object):
-
-        def __init__(self):
-            self.boardwalk_price = 500
-
-        @property
-        def boardwalk(self):
-            # In reality, this might represent a database call or time
-            # intensive task like calling a third-party API.
-            self.boardwalk_price += 50
-            return self.boardwalk_price
-
-Now run it:
-
-.. code-block:: python
-
-    >>> monopoly = Monopoly()
-    >>> monopoly.boardwalk
-    550
-    >>> monopoly.boardwalk
-    600
-
-Let's convert the boardwalk property into a ``cached_property``.
-
-.. code-block:: python
-
-    from cached_property import cached_property
-
-    class Monopoly(object):
-
-        def __init__(self):
-            self.boardwalk_price = 500
-
-        @cached_property
-        def boardwalk(self):
-            # Again, this is a silly example. Don't worry about it, this is
-            #   just an example for clarity.
-            self.boardwalk_price += 50
-            return self.boardwalk_price
-
-Now when we run it the price stays at $550.
-
-.. code-block:: python
-
-    >>> monopoly = Monopoly()
-    >>> monopoly.boardwalk
-    550
-    >>> monopoly.boardwalk
-    550
-    >>> monopoly.boardwalk
-    550
-
-Why doesn't the value of ``monopoly.boardwalk`` change? Because it's a **cached property**!
-
-Invalidating the Cache
-----------------------
-
-Results of cached functions can be invalidated by outside forces. Let's demonstrate how to force the cache to invalidate:
-
-.. code-block:: python
-
-    >>> monopoly = Monopoly()
-    >>> monopoly.boardwalk
-    550
-    >>> monopoly.boardwalk
-    550
-    >>> # invalidate the cache
-    >>> del monopoly.__dict__['boardwalk']
-    >>> # request the boardwalk property again
-    >>> monopoly.boardwalk
-    600
-    >>> monopoly.boardwalk
-    600
-
-Working with Threads
----------------------
-
-What if a whole bunch of people want to stay at Boardwalk all at once? This means using threads, which
-unfortunately causes problems with the standard ``cached_property``. In this case, switch to using the
-``threaded_cached_property``:
-
-.. code-block:: python
-
-    from cached_property import threaded_cached_property
-
-    class Monopoly(object):
-
-        def __init__(self):
-            self.boardwalk_price = 500
-
-        @threaded_cached_property
-        def boardwalk(self):
-            """threaded_cached_property is really nice for when no one waits
-                for other people to finish their turn and rudely start rolling
-                dice and moving their pieces."""
-
-            sleep(1)
-            self.boardwalk_price += 50
-            return self.boardwalk_price
-
-Now use it:
-
-.. code-block:: python
-
-    >>> from threading import Thread
-    >>> from monopoly import Monopoly
-    >>> monopoly = Monopoly()
-    >>> threads = []
-    >>> for x in range(10):
-    >>>     thread = Thread(target=lambda: monopoly.boardwalk)
-    >>>     thread.start()
-    >>>     threads.append(thread)
-
-    >>> for thread in threads:
-    >>>     thread.join()
-
-    >>> self.assertEqual(m.boardwalk, 550)
-
-
-Timing out the cache
---------------------
-
-Sometimes you want the price of things to reset after a time. Use the ``ttl``
-versions of ``cached_property`` and ``threaded_cached_property``.
-
-.. code-block:: python
-
-    import random
-    from cached_property import cached_property_with_ttl
-
-    class Monopoly(object):
-
-        @cached_property_with_ttl(ttl=5) # cache invalidates after 5 seconds
-        def dice(self):
-            # I dare the reader to implement a game using this method of 'rolling dice'.
-            return random.randint(2,12)
-
-Now use it:
-
-.. code-block:: python
-
-    >>> monopoly = Monopoly()
-    >>> monopoly.dice
-    10
-    >>> monopoly.dice
-    10
-    >>> from time import sleep
-    >>> sleep(6) # Sleeps long enough to expire the cache
-    >>> monopoly.dice
-    3
-    >>> monopoly.dice
-    3
-
-**Note:** The ``ttl`` tools do not reliably allow the clearing of the cache. This
-is why they are broken out into seperate tools. See https://github.com/pydanny/cached-property/issues/16.
-
-Credits
---------
-
-* Pip, Django, Werkzueg, Bottle, Pyramid, and Zope for having their own implementations. This package uses an implementation that matches the Bottle version.
-* Reinout Van Rees for pointing out the `cached_property` decorator to me.
-* My awesome wife `@audreyr`_ who created `cookiecutter`_, which meant rolling this out took me just 15 minutes.
-* @tinche for pointing out the threading issue and providing a solution.
-* @bcho for providing the time-to-expire feature
-
-.. _`@audreyr`: https://github.com/audreyr
-.. _`cookiecutter`: https://github.com/audreyr/cookiecutter
-
-
-
-
-History
--------
-
-1.3.0 (2015-11-24)
-++++++++++++++++++
-
-* Added official support for Python 3.5, thanks to @pydanny and @audreyr
-* Removed confusingly placed lock from example, thanks to @ionelmc
-* Corrected invalidation cache documentation, thanks to @proofit404
-* Updated to latest Travis-CI environment, thanks to @audreyr
-
-1.2.0 (2015-04-28)
-++++++++++++++++++
-
-* Overall code and test refactoring, thanks to @gsakkis
-* Allow the del statement for resetting cached properties with ttl instead of del obj._cache[attr], thanks to @gsakkis.
-* Uncovered a bug in PyPy, https://bitbucket.org/pypy/pypy/issue/2033/attributeerror-object-attribute-is-read, thanks to @gsakkis
-* Fixed threaded_cached_property_with_ttl to actually be thread-safe, thanks to @gsakkis
-
-1.1.0 (2015-04-04)
-++++++++++++++++++
-
-* Regression: As the cache was not always clearing, we've broken out the time to expire feature to its own set of specific tools, thanks to @pydanny
-* Fixed typo in README, thanks to @zoidbergwill
-
-1.0.0 (2015-02-13)
-++++++++++++++++++
-
-* Added timed to expire feature to ``cached_property`` decorator.
-* **Backwards incompatiblity**: Changed ``del monopoly.boardwalk`` to ``del monopoly['boardwalk']`` in order to support the new TTL feature.
-
-0.1.5 (2014-05-20)
-++++++++++++++++++
-
-* Added threading support with new ``threaded_cached_property`` decorator
-* Documented cache invalidation
-* Updated credits
-* Sourced the bottle implementation
-
-0.1.4 (2014-05-17)
-++++++++++++++++++
-
-* Fix the dang-blarged py_modules argument.
-
-0.1.3 (2014-05-17)
-++++++++++++++++++
-
-* Removed import of package into ``setup.py``
-
-0.1.2 (2014-05-17)
-++++++++++++++++++
-
-* Documentation fixes. Not opening up a RTFD instance for this because it's so simple to use.
-
-0.1.1 (2014-05-17)
-++++++++++++++++++
-
-* setup.py fix. Whoops!
-
-0.1.0 (2014-05-17)
-++++++++++++++++++
-
-* First release on PyPI.
-
-
diff --git a/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/INSTALLER b/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/INSTALLER
deleted file mode 100644
index a1b589e..0000000
--- a/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/METADATA b/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/METADATA
deleted file mode 100644
index e5764e7..0000000
--- a/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/METADATA
+++ /dev/null
@@ -1,286 +0,0 @@
-Metadata-Version: 2.0
-Name: cached-property
-Version: 1.3.0
-Summary: A decorator for caching properties in classes.
-Home-page: https://github.com/pydanny/cached-property
-Author: Daniel Greenfeld
-Author-email: pydanny@gmail.com
-License: BSD
-Keywords: cached-property
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Natural Language :: English
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-
-===============================
-cached-property
-===============================
-
-.. image:: https://img.shields.io/pypi/v/cached-property.svg
-    :target: https://pypi.python.org/pypi/cached-property
-
-.. image:: https://img.shields.io/travis/pydanny/cached-property/master.svg
-        :target: https://travis-ci.org/pydanny/cached-property
-
-
-A decorator for caching properties in classes.
-
-Why?
------
-
-* Makes caching of time or computational expensive properties quick and easy.
-* Because I got tired of copy/pasting this code from non-web project to non-web project.
-* I needed something really simple that worked in Python 2 and 3.
-
-How to use it
---------------
-
-Let's define a class with an expensive property. Every time you stay there the
-price goes up by $50!
-
-.. code-block:: python
-
-    class Monopoly(object):
-
-        def __init__(self):
-            self.boardwalk_price = 500
-
-        @property
-        def boardwalk(self):
-            # In reality, this might represent a database call or time
-            # intensive task like calling a third-party API.
-            self.boardwalk_price += 50
-            return self.boardwalk_price
-
-Now run it:
-
-.. code-block:: python
-
-    >>> monopoly = Monopoly()
-    >>> monopoly.boardwalk
-    550
-    >>> monopoly.boardwalk
-    600
-
-Let's convert the boardwalk property into a ``cached_property``.
-
-.. code-block:: python
-
-    from cached_property import cached_property
-
-    class Monopoly(object):
-
-        def __init__(self):
-            self.boardwalk_price = 500
-
-        @cached_property
-        def boardwalk(self):
-            # Again, this is a silly example. Don't worry about it, this is
-            #   just an example for clarity.
-            self.boardwalk_price += 50
-            return self.boardwalk_price
-
-Now when we run it the price stays at $550.
-
-.. code-block:: python
-
-    >>> monopoly = Monopoly()
-    >>> monopoly.boardwalk
-    550
-    >>> monopoly.boardwalk
-    550
-    >>> monopoly.boardwalk
-    550
-
-Why doesn't the value of ``monopoly.boardwalk`` change? Because it's a **cached property**!
-
-Invalidating the Cache
-----------------------
-
-Results of cached functions can be invalidated by outside forces. Let's demonstrate how to force the cache to invalidate:
-
-.. code-block:: python
-
-    >>> monopoly = Monopoly()
-    >>> monopoly.boardwalk
-    550
-    >>> monopoly.boardwalk
-    550
-    >>> # invalidate the cache
-    >>> del monopoly.__dict__['boardwalk']
-    >>> # request the boardwalk property again
-    >>> monopoly.boardwalk
-    600
-    >>> monopoly.boardwalk
-    600
-
-Working with Threads
----------------------
-
-What if a whole bunch of people want to stay at Boardwalk all at once? This means using threads, which
-unfortunately causes problems with the standard ``cached_property``. In this case, switch to using the
-``threaded_cached_property``:
-
-.. code-block:: python
-
-    from cached_property import threaded_cached_property
-
-    class Monopoly(object):
-
-        def __init__(self):
-            self.boardwalk_price = 500
-
-        @threaded_cached_property
-        def boardwalk(self):
-            """threaded_cached_property is really nice for when no one waits
-                for other people to finish their turn and rudely start rolling
-                dice and moving their pieces."""
-
-            sleep(1)
-            self.boardwalk_price += 50
-            return self.boardwalk_price
-
-Now use it:
-
-.. code-block:: python
-
-    >>> from threading import Thread
-    >>> from monopoly import Monopoly
-    >>> monopoly = Monopoly()
-    >>> threads = []
-    >>> for x in range(10):
-    >>>     thread = Thread(target=lambda: monopoly.boardwalk)
-    >>>     thread.start()
-    >>>     threads.append(thread)
-
-    >>> for thread in threads:
-    >>>     thread.join()
-
-    >>> self.assertEqual(m.boardwalk, 550)
-
-
-Timing out the cache
---------------------
-
-Sometimes you want the price of things to reset after a time. Use the ``ttl``
-versions of ``cached_property`` and ``threaded_cached_property``.
-
-.. code-block:: python
-
-    import random
-    from cached_property import cached_property_with_ttl
-
-    class Monopoly(object):
-
-        @cached_property_with_ttl(ttl=5) # cache invalidates after 5 seconds
-        def dice(self):
-            # I dare the reader to implement a game using this method of 'rolling dice'.
-            return random.randint(2,12)
-
-Now use it:
-
-.. code-block:: python
-
-    >>> monopoly = Monopoly()
-    >>> monopoly.dice
-    10
-    >>> monopoly.dice
-    10
-    >>> from time import sleep
-    >>> sleep(6) # Sleeps long enough to expire the cache
-    >>> monopoly.dice
-    3
-    >>> monopoly.dice
-    3
-
-**Note:** The ``ttl`` tools do not reliably allow the clearing of the cache. This
-is why they are broken out into seperate tools. See https://github.com/pydanny/cached-property/issues/16.
-
-Credits
---------
-
-* Pip, Django, Werkzueg, Bottle, Pyramid, and Zope for having their own implementations. This package uses an implementation that matches the Bottle version.
-* Reinout Van Rees for pointing out the `cached_property` decorator to me.
-* My awesome wife `@audreyr`_ who created `cookiecutter`_, which meant rolling this out took me just 15 minutes.
-* @tinche for pointing out the threading issue and providing a solution.
-* @bcho for providing the time-to-expire feature
-
-.. _`@audreyr`: https://github.com/audreyr
-.. _`cookiecutter`: https://github.com/audreyr/cookiecutter
-
-
-
-
-History
--------
-
-1.3.0 (2015-11-24)
-++++++++++++++++++
-
-* Added official support for Python 3.5, thanks to @pydanny and @audreyr
-* Removed confusingly placed lock from example, thanks to @ionelmc
-* Corrected invalidation cache documentation, thanks to @proofit404
-* Updated to latest Travis-CI environment, thanks to @audreyr
-
-1.2.0 (2015-04-28)
-++++++++++++++++++
-
-* Overall code and test refactoring, thanks to @gsakkis
-* Allow the del statement for resetting cached properties with ttl instead of del obj._cache[attr], thanks to @gsakkis.
-* Uncovered a bug in PyPy, https://bitbucket.org/pypy/pypy/issue/2033/attributeerror-object-attribute-is-read, thanks to @gsakkis
-* Fixed threaded_cached_property_with_ttl to actually be thread-safe, thanks to @gsakkis
-
-1.1.0 (2015-04-04)
-++++++++++++++++++
-
-* Regression: As the cache was not always clearing, we've broken out the time to expire feature to its own set of specific tools, thanks to @pydanny
-* Fixed typo in README, thanks to @zoidbergwill
-
-1.0.0 (2015-02-13)
-++++++++++++++++++
-
-* Added timed to expire feature to ``cached_property`` decorator.
-* **Backwards incompatiblity**: Changed ``del monopoly.boardwalk`` to ``del monopoly['boardwalk']`` in order to support the new TTL feature.
-
-0.1.5 (2014-05-20)
-++++++++++++++++++
-
-* Added threading support with new ``threaded_cached_property`` decorator
-* Documented cache invalidation
-* Updated credits
-* Sourced the bottle implementation
-
-0.1.4 (2014-05-17)
-++++++++++++++++++
-
-* Fix the dang-blarged py_modules argument.
-
-0.1.3 (2014-05-17)
-++++++++++++++++++
-
-* Removed import of package into ``setup.py``
-
-0.1.2 (2014-05-17)
-++++++++++++++++++
-
-* Documentation fixes. Not opening up a RTFD instance for this because it's so simple to use.
-
-0.1.1 (2014-05-17)
-++++++++++++++++++
-
-* setup.py fix. Whoops!
-
-0.1.0 (2014-05-17)
-++++++++++++++++++
-
-* First release on PyPI.
-
-
diff --git a/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/RECORD b/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/RECORD
deleted file mode 100644
index b764fa9..0000000
--- a/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/RECORD
+++ /dev/null
@@ -1,9 +0,0 @@
-cached_property.py,sha256=eL8h26nf5AKH37NqxcFQrCj1n9eWQ-QEUxO0HUXpThQ,3900

-cached_property-1.3.0.dist-info/DESCRIPTION.rst,sha256=CsyB7KnTiNEfAvDLhcg88JtLLedX_mQ94LaRCzZjHDA,7293

-cached_property-1.3.0.dist-info/METADATA,sha256=OF00ShiPHaabB6TussjxZ3bkdulyzCT4wWbwxzDw55M,8114

-cached_property-1.3.0.dist-info/RECORD,,

-cached_property-1.3.0.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110

-cached_property-1.3.0.dist-info/metadata.json,sha256=0Z7QJmhQLcCRzDaIKB3CKSfAQvxriQKDiVG4rJdkgEc,932

-cached_property-1.3.0.dist-info/top_level.txt,sha256=Pst93XQ2enhlPVfCIpc4Kv38f6JXEYuki96hkyh-1JE,16

-cached_property-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4

-cached_property.pyc,,

diff --git a/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/WHEEL b/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/WHEEL
deleted file mode 100644
index 0de529b..0000000
--- a/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/WHEEL
+++ /dev/null
@@ -1,6 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.26.0)
-Root-Is-Purelib: true
-Tag: py2-none-any
-Tag: py3-none-any
-
diff --git a/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/metadata.json b/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/metadata.json
deleted file mode 100644
index 8196b43..0000000
--- a/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/metadata.json
+++ /dev/null
@@ -1 +0,0 @@
-{"generator": "bdist_wheel (0.26.0)", "summary": "A decorator for caching properties in classes.", "classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Natural Language :: English", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"], "extensions": {"python.details": {"project_urls": {"Home": "https://github.com/pydanny/cached-property"}, "contacts": [{"email": "pydanny@gmail.com", "name": "Daniel Greenfeld", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}}}, "keywords": ["cached-property"], "license": "BSD", "metadata_version": "2.0", "name": "cached-property", "version": "1.3.0"}
\ No newline at end of file
diff --git a/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/top_level.txt b/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/top_level.txt
deleted file mode 100644
index 05a3432..0000000
--- a/env2/lib/python2.7/site-packages/cached_property-1.3.0.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-cached_property
diff --git a/env2/lib/python2.7/site-packages/cached_property.py b/env2/lib/python2.7/site-packages/cached_property.py
deleted file mode 100644
index 6a342d5..0000000
--- a/env2/lib/python2.7/site-packages/cached_property.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# -*- coding: utf-8 -*-
-
-__author__ = 'Daniel Greenfeld'
-__email__ = 'pydanny@gmail.com'
-__version__ = '1.3.0'
-__license__ = 'BSD'
-
-from time import time
-import threading
-
-
-class cached_property(object):
-    """
-    A property that is only computed once per instance and then replaces itself
-    with an ordinary attribute. Deleting the attribute resets the property.
-    Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
-    """  # noqa
-
-    def __init__(self, func):
-        self.__doc__ = getattr(func, '__doc__')
-        self.func = func
-
-    def __get__(self, obj, cls):
-        if obj is None:
-            return self
-        value = obj.__dict__[self.func.__name__] = self.func(obj)
-        return value
-
-
-class threaded_cached_property(object):
-    """
-    A cached_property version for use in environments where multiple threads
-    might concurrently try to access the property.
-    """
-
-    def __init__(self, func):
-        self.__doc__ = getattr(func, '__doc__')
-        self.func = func
-        self.lock = threading.RLock()
-
-    def __get__(self, obj, cls):
-        if obj is None:
-            return self
-
-        obj_dict = obj.__dict__
-        name = self.func.__name__
-        with self.lock:
-            try:
-                # check if the value was computed before the lock was acquired
-                return obj_dict[name]
-            except KeyError:
-                # if not, do the calculation and release the lock
-                return obj_dict.setdefault(name, self.func(obj))
-
-
-class cached_property_with_ttl(object):
-    """
-    A property that is only computed once per instance and then replaces itself
-    with an ordinary attribute. Setting the ttl to a number expresses how long
-    the property will last before being timed out.
-    """
-
-    def __init__(self, ttl=None):
-        if callable(ttl):
-            func = ttl
-            ttl = None
-        else:
-            func = None
-        self.ttl = ttl
-        self._prepare_func(func)
-
-    def __call__(self, func):
-        self._prepare_func(func)
-        return self
-
-    def __get__(self, obj, cls):
-        if obj is None:
-            return self
-
-        now = time()
-        obj_dict = obj.__dict__
-        name = self.__name__
-        try:
-            value, last_updated = obj_dict[name]
-        except KeyError:
-            pass
-        else:
-            ttl_expired = self.ttl and self.ttl < now - last_updated
-            if not ttl_expired:
-                return value
-
-        value = self.func(obj)
-        obj_dict[name] = (value, now)
-        return value
-
-    def __delete__(self, obj):
-        obj.__dict__.pop(self.__name__, None)
-
-    def __set__(self, obj, value):
-        obj.__dict__[self.__name__] = (value, time())
-
-    def _prepare_func(self, func):
-        self.func = func
-        if func:
-            self.__doc__ = func.__doc__
-            self.__name__ = func.__name__
-            self.__module__ = func.__module__
-
-# Aliases to make cached_property_with_ttl easier to use
-cached_property_ttl = cached_property_with_ttl
-timed_cached_property = cached_property_with_ttl
-
-
-class threaded_cached_property_with_ttl(cached_property_with_ttl):
-    """
-    A cached_property version for use in environments where multiple threads
-    might concurrently try to access the property.
-    """
-
-    def __init__(self, ttl=None):
-        super(threaded_cached_property_with_ttl, self).__init__(ttl)
-        self.lock = threading.RLock()
-
-    def __get__(self, obj, cls):
-        with self.lock:
-            return super(threaded_cached_property_with_ttl, self).__get__(obj,
-                                                                          cls)
-
-# Alias to make threaded_cached_property_with_ttl easier to use
-threaded_cached_property_ttl = threaded_cached_property_with_ttl
-timed_threaded_cached_property = threaded_cached_property_with_ttl
diff --git a/env2/lib/python2.7/site-packages/compose/GITSHA b/env2/lib/python2.7/site-packages/compose/GITSHA
deleted file mode 100644
index 0b6e76f..0000000
--- a/env2/lib/python2.7/site-packages/compose/GITSHA
+++ /dev/null
@@ -1 +0,0 @@
-94f7016
diff --git a/env2/lib/python2.7/site-packages/compose/__init__.py b/env2/lib/python2.7/site-packages/compose/__init__.py
deleted file mode 100644
index c550f99..0000000
--- a/env2/lib/python2.7/site-packages/compose/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-__version__ = '1.8.0'
diff --git a/env2/lib/python2.7/site-packages/compose/__main__.py b/env2/lib/python2.7/site-packages/compose/__main__.py
deleted file mode 100644
index 27a7acb..0000000
--- a/env2/lib/python2.7/site-packages/compose/__main__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-from compose.cli.main import main
-
-main()
diff --git a/env2/lib/python2.7/site-packages/compose/bundle.py b/env2/lib/python2.7/site-packages/compose/bundle.py
deleted file mode 100644
index afbdabf..0000000
--- a/env2/lib/python2.7/site-packages/compose/bundle.py
+++ /dev/null
@@ -1,257 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import json
-import logging
-
-import six
-from docker.utils import split_command
-from docker.utils.ports import split_port
-
-from .cli.errors import UserError
-from .config.serialize import denormalize_config
-from .network import get_network_defs_for_service
-from .service import format_environment
-from .service import NoSuchImageError
-from .service import parse_repository_tag
-
-
-log = logging.getLogger(__name__)
-
-
-SERVICE_KEYS = {
-    'working_dir': 'WorkingDir',
-    'user': 'User',
-    'labels': 'Labels',
-}
-
-IGNORED_KEYS = {'build'}
-
-SUPPORTED_KEYS = {
-    'image',
-    'ports',
-    'expose',
-    'networks',
-    'command',
-    'environment',
-    'entrypoint',
-} | set(SERVICE_KEYS)
-
-VERSION = '0.1'
-
-
-class NeedsPush(Exception):
-    def __init__(self, image_name):
-        self.image_name = image_name
-
-
-class NeedsPull(Exception):
-    def __init__(self, image_name):
-        self.image_name = image_name
-
-
-class MissingDigests(Exception):
-    def __init__(self, needs_push, needs_pull):
-        self.needs_push = needs_push
-        self.needs_pull = needs_pull
-
-
-def serialize_bundle(config, image_digests):
-    return json.dumps(to_bundle(config, image_digests), indent=2, sort_keys=True)
-
-
-def get_image_digests(project, allow_push=False):
-    digests = {}
-    needs_push = set()
-    needs_pull = set()
-
-    for service in project.services:
-        try:
-            digests[service.name] = get_image_digest(
-                service,
-                allow_push=allow_push,
-            )
-        except NeedsPush as e:
-            needs_push.add(e.image_name)
-        except NeedsPull as e:
-            needs_pull.add(e.image_name)
-
-    if needs_push or needs_pull:
-        raise MissingDigests(needs_push, needs_pull)
-
-    return digests
-
-
-def get_image_digest(service, allow_push=False):
-    if 'image' not in service.options:
-        raise UserError(
-            "Service '{s.name}' doesn't define an image tag. An image name is "
-            "required to generate a proper image digest for the bundle. Specify "
-            "an image repo and tag with the 'image' option.".format(s=service))
-
-    _, _, separator = parse_repository_tag(service.options['image'])
-    # Compose file already uses a digest, no lookup required
-    if separator == '@':
-        return service.options['image']
-
-    try:
-        image = service.image()
-    except NoSuchImageError:
-        action = 'build' if 'build' in service.options else 'pull'
-        raise UserError(
-            "Image not found for service '{service}'. "
-            "You might need to run `docker-compose {action} {service}`."
-            .format(service=service.name, action=action))
-
-    if image['RepoDigests']:
-        # TODO: pick a digest based on the image tag if there are multiple
-        # digests
-        return image['RepoDigests'][0]
-
-    if 'build' not in service.options:
-        raise NeedsPull(service.image_name)
-
-    if not allow_push:
-        raise NeedsPush(service.image_name)
-
-    return push_image(service)
-
-
-def push_image(service):
-    try:
-        digest = service.push()
-    except:
-        log.error(
-            "Failed to push image for service '{s.name}'. Please use an "
-            "image tag that can be pushed to a Docker "
-            "registry.".format(s=service))
-        raise
-
-    if not digest:
-        raise ValueError("Failed to get digest for %s" % service.name)
-
-    repo, _, _ = parse_repository_tag(service.options['image'])
-    identifier = '{repo}@{digest}'.format(repo=repo, digest=digest)
-
-    # only do this if RepoDigests isn't already populated
-    image = service.image()
-    if not image['RepoDigests']:
-        # Pull by digest so that image['RepoDigests'] is populated for next time
-        # and we don't have to pull/push again
-        service.client.pull(identifier)
-        log.info("Stored digest for {}".format(service.image_name))
-
-    return identifier
-
-
-def to_bundle(config, image_digests):
-    if config.networks:
-        log.warn("Unsupported top level key 'networks' - ignoring")
-
-    if config.volumes:
-        log.warn("Unsupported top level key 'volumes' - ignoring")
-
-    config = denormalize_config(config)
-
-    return {
-        'Version': VERSION,
-        'Services': {
-            name: convert_service_to_bundle(
-                name,
-                service_dict,
-                image_digests[name],
-            )
-            for name, service_dict in config['services'].items()
-        },
-    }
-
-
-def convert_service_to_bundle(name, service_dict, image_digest):
-    container_config = {'Image': image_digest}
-
-    for key, value in service_dict.items():
-        if key in IGNORED_KEYS:
-            continue
-
-        if key not in SUPPORTED_KEYS:
-            log.warn("Unsupported key '{}' in services.{} - ignoring".format(key, name))
-            continue
-
-        if key == 'environment':
-            container_config['Env'] = format_environment({
-                envkey: envvalue for envkey, envvalue in value.items()
-                if envvalue
-            })
-            continue
-
-        if key in SERVICE_KEYS:
-            container_config[SERVICE_KEYS[key]] = value
-            continue
-
-    set_command_and_args(
-        container_config,
-        service_dict.get('entrypoint', []),
-        service_dict.get('command', []))
-    container_config['Networks'] = make_service_networks(name, service_dict)
-
-    ports = make_port_specs(service_dict)
-    if ports:
-        container_config['Ports'] = ports
-
-    return container_config
-
-
-# See https://github.com/docker/swarmkit/blob//agent/exec/container/container.go#L95
-def set_command_and_args(config, entrypoint, command):
-    if isinstance(entrypoint, six.string_types):
-        entrypoint = split_command(entrypoint)
-    if isinstance(command, six.string_types):
-        command = split_command(command)
-
-    if entrypoint:
-        config['Command'] = entrypoint + command
-        return
-
-    if command:
-        config['Args'] = command
-
-
-def make_service_networks(name, service_dict):
-    networks = []
-
-    for network_name, network_def in get_network_defs_for_service(service_dict).items():
-        for key in network_def.keys():
-            log.warn(
-                "Unsupported key '{}' in services.{}.networks.{} - ignoring"
-                .format(key, name, network_name))
-
-        networks.append(network_name)
-
-    return networks
-
-
-def make_port_specs(service_dict):
-    ports = []
-
-    internal_ports = [
-        internal_port
-        for port_def in service_dict.get('ports', [])
-        for internal_port in split_port(port_def)[0]
-    ]
-
-    internal_ports += service_dict.get('expose', [])
-
-    for internal_port in internal_ports:
-        spec = make_port_spec(internal_port)
-        if spec not in ports:
-            ports.append(spec)
-
-    return ports
-
-
-def make_port_spec(value):
-    components = six.text_type(value).partition('/')
-    return {
-        'Protocol': components[2] or 'tcp',
-        'Port': int(components[0]),
-    }
diff --git a/env2/lib/python2.7/site-packages/compose/cli/__init__.py b/env2/lib/python2.7/site-packages/compose/cli/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/env2/lib/python2.7/site-packages/compose/cli/__init__.py
+++ /dev/null
diff --git a/env2/lib/python2.7/site-packages/compose/cli/colors.py b/env2/lib/python2.7/site-packages/compose/cli/colors.py
deleted file mode 100644
index 3c18886..0000000
--- a/env2/lib/python2.7/site-packages/compose/cli/colors.py
+++ /dev/null
@@ -1,43 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-NAMES = [
-    'grey',
-    'red',
-    'green',
-    'yellow',
-    'blue',
-    'magenta',
-    'cyan',
-    'white'
-]
-
-
-def get_pairs():
-    for i, name in enumerate(NAMES):
-        yield(name, str(30 + i))
-        yield('intense_' + name, str(30 + i) + ';1')
-
-
-def ansi(code):
-    return '\033[{0}m'.format(code)
-
-
-def ansi_color(code, s):
-    return '{0}{1}{2}'.format(ansi(code), s, ansi(0))
-
-
-def make_color_fn(code):
-    return lambda s: ansi_color(code, s)
-
-
-for (name, code) in get_pairs():
-    globals()[name] = make_color_fn(code)
-
-
-def rainbow():
-    cs = ['cyan', 'yellow', 'green', 'magenta', 'red', 'blue',
-          'intense_cyan', 'intense_yellow', 'intense_green',
-          'intense_magenta', 'intense_red', 'intense_blue']
-
-    for c in cs:
-        yield globals()[c]
diff --git a/env2/lib/python2.7/site-packages/compose/cli/command.py b/env2/lib/python2.7/site-packages/compose/cli/command.py
deleted file mode 100644
index 2c70d31..0000000
--- a/env2/lib/python2.7/site-packages/compose/cli/command.py
+++ /dev/null
@@ -1,130 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import logging
-import os
-import re
-import ssl
-
-import six
-
-from . import verbose_proxy
-from .. import config
-from ..config.environment import Environment
-from ..const import API_VERSIONS
-from ..project import Project
-from .docker_client import docker_client
-from .docker_client import tls_config_from_options
-from .utils import get_version_info
-
-log = logging.getLogger(__name__)
-
-
-def project_from_options(project_dir, options):
-    environment = Environment.from_env_file(project_dir)
-    host = options.get('--host')
-    if host is not None:
-        host = host.lstrip('=')
-    return get_project(
-        project_dir,
-        get_config_path_from_options(project_dir, options, environment),
-        project_name=options.get('--project-name'),
-        verbose=options.get('--verbose'),
-        host=host,
-        tls_config=tls_config_from_options(options),
-        environment=environment
-    )
-
-
-def get_config_from_options(base_dir, options):
-    environment = Environment.from_env_file(base_dir)
-    config_path = get_config_path_from_options(
-        base_dir, options, environment
-    )
-    return config.load(
-        config.find(base_dir, config_path, environment)
-    )
-
-
-def get_config_path_from_options(base_dir, options, environment):
-    file_option = options.get('--file')
-    if file_option:
-        return file_option
-
-    config_files = environment.get('COMPOSE_FILE')
-    if config_files:
-        return config_files.split(os.pathsep)
-    return None
-
-
-def get_tls_version(environment):
-    compose_tls_version = environment.get('COMPOSE_TLS_VERSION', None)
-    if not compose_tls_version:
-        return None
-
-    tls_attr_name = "PROTOCOL_{}".format(compose_tls_version)
-    if not hasattr(ssl, tls_attr_name):
-        log.warn(
-            'The "{}" protocol is unavailable. You may need to update your '
-            'version of Python or OpenSSL. Falling back to TLSv1 (default).'
-            .format(compose_tls_version)
-        )
-        return None
-
-    return getattr(ssl, tls_attr_name)
-
-
-def get_client(environment, verbose=False, version=None, tls_config=None, host=None,
-               tls_version=None):
-
-    client = docker_client(
-        version=version, tls_config=tls_config, host=host,
-        environment=environment, tls_version=get_tls_version(environment)
-    )
-    if verbose:
-        version_info = six.iteritems(client.version())
-        log.info(get_version_info('full'))
-        log.info("Docker base_url: %s", client.base_url)
-        log.info("Docker version: %s",
-                 ", ".join("%s=%s" % item for item in version_info))
-        return verbose_proxy.VerboseProxy('docker', client)
-    return client
-
-
-def get_project(project_dir, config_path=None, project_name=None, verbose=False,
-                host=None, tls_config=None, environment=None):
-    if not environment:
-        environment = Environment.from_env_file(project_dir)
-    config_details = config.find(project_dir, config_path, environment)
-    project_name = get_project_name(
-        config_details.working_dir, project_name, environment
-    )
-    config_data = config.load(config_details)
-
-    api_version = environment.get(
-        'COMPOSE_API_VERSION',
-        API_VERSIONS[config_data.version])
-
-    client = get_client(
-        verbose=verbose, version=api_version, tls_config=tls_config,
-        host=host, environment=environment
-    )
-
-    return Project.from_config(project_name, config_data, client)
-
-
-def get_project_name(working_dir, project_name=None, environment=None):
-    def normalize_name(name):
-        return re.sub(r'[^a-z0-9]', '', name.lower())
-
-    if not environment:
-        environment = Environment.from_env_file(working_dir)
-    project_name = project_name or environment.get('COMPOSE_PROJECT_NAME')
-    if project_name:
-        return normalize_name(project_name)
-
-    project = os.path.basename(os.path.abspath(working_dir))
-    if project:
-        return normalize_name(project)
-
-    return 'default'
diff --git a/env2/lib/python2.7/site-packages/compose/cli/docker_client.py b/env2/lib/python2.7/site-packages/compose/cli/docker_client.py
deleted file mode 100644
index ce191fb..0000000
--- a/env2/lib/python2.7/site-packages/compose/cli/docker_client.py
+++ /dev/null
@@ -1,73 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import logging
-
-from docker import Client
-from docker.errors import TLSParameterError
-from docker.tls import TLSConfig
-from docker.utils import kwargs_from_env
-
-from ..const import HTTP_TIMEOUT
-from .errors import UserError
-from .utils import generate_user_agent
-
-log = logging.getLogger(__name__)
-
-
-def tls_config_from_options(options):
-    tls = options.get('--tls', False)
-    ca_cert = options.get('--tlscacert')
-    cert = options.get('--tlscert')
-    key = options.get('--tlskey')
-    verify = options.get('--tlsverify')
-    skip_hostname_check = options.get('--skip-hostname-check', False)
-
-    advanced_opts = any([ca_cert, cert, key, verify])
-
-    if tls is True and not advanced_opts:
-        return True
-    elif advanced_opts:  # --tls is a noop
-        client_cert = None
-        if cert or key:
-            client_cert = (cert, key)
-
-        return TLSConfig(
-            client_cert=client_cert, verify=verify, ca_cert=ca_cert,
-            assert_hostname=False if skip_hostname_check else None
-        )
-
-    return None
-
-
-def docker_client(environment, version=None, tls_config=None, host=None,
-                  tls_version=None):
-    """
-    Returns a docker-py client configured using environment variables
-    according to the same logic as the official Docker client.
-    """
-    try:
-        kwargs = kwargs_from_env(environment=environment, ssl_version=tls_version)
-    except TLSParameterError:
-        raise UserError(
-            "TLS configuration is invalid - make sure your DOCKER_TLS_VERIFY "
-            "and DOCKER_CERT_PATH are set correctly.\n"
-            "You might need to run `eval \"$(docker-machine env default)\"`")
-
-    if host:
-        kwargs['base_url'] = host
-    if tls_config:
-        kwargs['tls'] = tls_config
-
-    if version:
-        kwargs['version'] = version
-
-    timeout = environment.get('COMPOSE_HTTP_TIMEOUT')
-    if timeout:
-        kwargs['timeout'] = int(timeout)
-    else:
-        kwargs['timeout'] = HTTP_TIMEOUT
-
-    kwargs['user_agent'] = generate_user_agent()
-
-    return Client(**kwargs)
diff --git a/env2/lib/python2.7/site-packages/compose/cli/docopt_command.py b/env2/lib/python2.7/site-packages/compose/cli/docopt_command.py
deleted file mode 100644
index 809a4b7..0000000
--- a/env2/lib/python2.7/site-packages/compose/cli/docopt_command.py
+++ /dev/null
@@ -1,59 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-from inspect import getdoc
-
-from docopt import docopt
-from docopt import DocoptExit
-
-
-def docopt_full_help(docstring, *args, **kwargs):
-    try:
-        return docopt(docstring, *args, **kwargs)
-    except DocoptExit:
-        raise SystemExit(docstring)
-
-
-class DocoptDispatcher(object):
-
-    def __init__(self, command_class, options):
-        self.command_class = command_class
-        self.options = options
-
-    def parse(self, argv):
-        command_help = getdoc(self.command_class)
-        options = docopt_full_help(command_help, argv, **self.options)
-        command = options['COMMAND']
-
-        if command is None:
-            raise SystemExit(command_help)
-
-        handler = get_handler(self.command_class, command)
-        docstring = getdoc(handler)
-
-        if docstring is None:
-            raise NoSuchCommand(command, self)
-
-        command_options = docopt_full_help(docstring, options['ARGS'], options_first=True)
-        return options, handler, command_options
-
-
-def get_handler(command_class, command):
-    command = command.replace('-', '_')
-    # we certainly want to have "exec" command, since that's what docker client has
-    # but in python exec is a keyword
-    if command == "exec":
-        command = "exec_command"
-
-    if not hasattr(command_class, command):
-        raise NoSuchCommand(command, command_class)
-
-    return getattr(command_class, command)
-
-
-class NoSuchCommand(Exception):
-    def __init__(self, command, supercommand):
-        super(NoSuchCommand, self).__init__("No such command: %s" % command)
-
-        self.command = command
-        self.supercommand = supercommand
diff --git a/env2/lib/python2.7/site-packages/compose/cli/errors.py b/env2/lib/python2.7/site-packages/compose/cli/errors.py
deleted file mode 100644
index 5af3ede..0000000
--- a/env2/lib/python2.7/site-packages/compose/cli/errors.py
+++ /dev/null
@@ -1,139 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import contextlib
-import logging
-import socket
-from textwrap import dedent
-
-from docker.errors import APIError
-from requests.exceptions import ConnectionError as RequestsConnectionError
-from requests.exceptions import ReadTimeout
-from requests.exceptions import SSLError
-from requests.packages.urllib3.exceptions import ReadTimeoutError
-
-from ..const import API_VERSION_TO_ENGINE_VERSION
-from .utils import call_silently
-from .utils import is_docker_for_mac_installed
-from .utils import is_mac
-from .utils import is_ubuntu
-
-
-log = logging.getLogger(__name__)
-
-
-class UserError(Exception):
-
-    def __init__(self, msg):
-        self.msg = dedent(msg).strip()
-
-    def __unicode__(self):
-        return self.msg
-
-    __str__ = __unicode__
-
-
-class ConnectionError(Exception):
-    pass
-
-
-@contextlib.contextmanager
-def handle_connection_errors(client):
-    try:
-        yield
-    except SSLError as e:
-        log.error('SSL error: %s' % e)
-        raise ConnectionError()
-    except RequestsConnectionError as e:
-        if e.args and isinstance(e.args[0], ReadTimeoutError):
-            log_timeout_error(client.timeout)
-            raise ConnectionError()
-        exit_with_error(get_conn_error_message(client.base_url))
-    except APIError as e:
-        log_api_error(e, client.api_version)
-        raise ConnectionError()
-    except (ReadTimeout, socket.timeout) as e:
-        log_timeout_error()
-        raise ConnectionError()
-
-
-def log_timeout_error(timeout):
-    log.error(
-        "An HTTP request took too long to complete. Retry with --verbose to "
-        "obtain debug information.\n"
-        "If you encounter this issue regularly because of slow network "
-        "conditions, consider setting COMPOSE_HTTP_TIMEOUT to a higher "
-        "value (current value: %s)." % timeout)
-
-
-def log_api_error(e, client_version):
-    if b'client is newer than server' not in e.explanation:
-        log.error(e.explanation)
-        return
-
-    version = API_VERSION_TO_ENGINE_VERSION.get(client_version)
-    if not version:
-        # They've set a custom API version
-        log.error(e.explanation)
-        return
-
-    log.error(
-        "The Docker Engine version is less than the minimum required by "
-        "Compose. Your current project requires a Docker Engine of "
-        "version {version} or greater.".format(version=version))
-
-
-def exit_with_error(msg):
-    log.error(dedent(msg).strip())
-    raise ConnectionError()
-
-
-def get_conn_error_message(url):
-    if call_silently(['which', 'docker']) != 0:
-        if is_mac():
-            return docker_not_found_mac
-        if is_ubuntu():
-            return docker_not_found_ubuntu
-        return docker_not_found_generic
-    if is_docker_for_mac_installed():
-        return conn_error_docker_for_mac
-    if call_silently(['which', 'docker-machine']) == 0:
-        return conn_error_docker_machine
-    return conn_error_generic.format(url=url)
-
-
-docker_not_found_mac = """
-    Couldn't connect to Docker daemon. You might need to install Docker:
-
-    https://docs.docker.com/engine/installation/mac/
-"""
-
-
-docker_not_found_ubuntu = """
-    Couldn't connect to Docker daemon. You might need to install Docker:
-
-    https://docs.docker.com/engine/installation/ubuntulinux/
-"""
-
-
-docker_not_found_generic = """
-    Couldn't connect to Docker daemon. You might need to install Docker:
-
-    https://docs.docker.com/engine/installation/
-"""
-
-
-conn_error_docker_machine = """
-    Couldn't connect to Docker daemon - you might need to run `docker-machine start default`.
-"""
-
-conn_error_docker_for_mac = """
-    Couldn't connect to Docker daemon. You might need to start Docker for Mac.
-"""
-
-
-conn_error_generic = """
-    Couldn't connect to Docker daemon at {url} - is it running?
-
-    If it's at a non-standard location, specify the URL with the DOCKER_HOST environment variable.
-"""
diff --git a/env2/lib/python2.7/site-packages/compose/cli/formatter.py b/env2/lib/python2.7/site-packages/compose/cli/formatter.py
deleted file mode 100644
index d0ed0f8..0000000
--- a/env2/lib/python2.7/site-packages/compose/cli/formatter.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import logging
-import os
-
-import texttable
-
-from compose.cli import colors
-
-
-def get_tty_width():
-    tty_size = os.popen('stty size', 'r').read().split()
-    if len(tty_size) != 2:
-        return 0
-    _, width = tty_size
-    return int(width)
-
-
-class Formatter(object):
-    """Format tabular data for printing."""
-    def table(self, headers, rows):
-        table = texttable.Texttable(max_width=get_tty_width())
-        table.set_cols_dtype(['t' for h in headers])
-        table.add_rows([headers] + rows)
-        table.set_deco(table.HEADER)
-        table.set_chars(['-', '|', '+', '-'])
-
-        return table.draw()
-
-
-class ConsoleWarningFormatter(logging.Formatter):
-    """A logging.Formatter which prints WARNING and ERROR messages with
-    a prefix of the log level colored appropriate for the log level.
-    """
-
-    def get_level_message(self, record):
-        separator = ': '
-        if record.levelno == logging.WARNING:
-            return colors.yellow(record.levelname) + separator
-        if record.levelno == logging.ERROR:
-            return colors.red(record.levelname) + separator
-
-        return ''
-
-    def format(self, record):
-        message = super(ConsoleWarningFormatter, self).format(record)
-        return self.get_level_message(record) + message
diff --git a/env2/lib/python2.7/site-packages/compose/cli/log_printer.py b/env2/lib/python2.7/site-packages/compose/cli/log_printer.py
deleted file mode 100644
index b48462f..0000000
--- a/env2/lib/python2.7/site-packages/compose/cli/log_printer.py
+++ /dev/null
@@ -1,230 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import sys
-from collections import namedtuple
-from itertools import cycle
-from threading import Thread
-
-from six.moves import _thread as thread
-from six.moves.queue import Empty
-from six.moves.queue import Queue
-
-from . import colors
-from compose import utils
-from compose.cli.signals import ShutdownException
-from compose.utils import split_buffer
-
-
-class LogPresenter(object):
-
-    def __init__(self, prefix_width, color_func):
-        self.prefix_width = prefix_width
-        self.color_func = color_func
-
-    def present(self, container, line):
-        prefix = container.name_without_project.ljust(self.prefix_width)
-        return '{prefix} {line}'.format(
-            prefix=self.color_func(prefix + ' |'),
-            line=line)
-
-
-def build_log_presenters(service_names, monochrome):
-    """Return an iterable of functions.
-
-    Each function can be used to format the logs output of a container.
-    """
-    prefix_width = max_name_width(service_names)
-
-    def no_color(text):
-        return text
-
-    for color_func in cycle([no_color] if monochrome else colors.rainbow()):
-        yield LogPresenter(prefix_width, color_func)
-
-
-def max_name_width(service_names, max_index_width=3):
-    """Calculate the maximum width of container names so we can make the log
-    prefixes line up like so:
-
-    db_1  | Listening
-    web_1 | Listening
-    """
-    return max(len(name) for name in service_names) + max_index_width
-
-
-class LogPrinter(object):
-    """Print logs from many containers to a single output stream."""
-
-    def __init__(self,
-                 containers,
-                 presenters,
-                 event_stream,
-                 output=sys.stdout,
-                 cascade_stop=False,
-                 log_args=None):
-        self.containers = containers
-        self.presenters = presenters
-        self.event_stream = event_stream
-        self.output = utils.get_output_stream(output)
-        self.cascade_stop = cascade_stop
-        self.log_args = log_args or {}
-
-    def run(self):
-        if not self.containers:
-            return
-
-        queue = Queue()
-        thread_args = queue, self.log_args
-        thread_map = build_thread_map(self.containers, self.presenters, thread_args)
-        start_producer_thread((
-            thread_map,
-            self.event_stream,
-            self.presenters,
-            thread_args))
-
-        for line in consume_queue(queue, self.cascade_stop):
-            remove_stopped_threads(thread_map)
-
-            if not line:
-                if not thread_map:
-                    # There are no running containers left to tail, so exit
-                    return
-                # We got an empty line because of a timeout, but there are still
-                # active containers to tail, so continue
-                continue
-
-            self.output.write(line)
-            self.output.flush()
-
-
-def remove_stopped_threads(thread_map):
-    for container_id, tailer_thread in list(thread_map.items()):
-        if not tailer_thread.is_alive():
-            thread_map.pop(container_id, None)
-
-
-def build_thread(container, presenter, queue, log_args):
-    tailer = Thread(
-        target=tail_container_logs,
-        args=(container, presenter, queue, log_args))
-    tailer.daemon = True
-    tailer.start()
-    return tailer
-
-
-def build_thread_map(initial_containers, presenters, thread_args):
-    return {
-        container.id: build_thread(container, next(presenters), *thread_args)
-        for container in initial_containers
-    }
-
-
-class QueueItem(namedtuple('_QueueItem', 'item is_stop exc')):
-
-    @classmethod
-    def new(cls, item):
-        return cls(item, None, None)
-
-    @classmethod
-    def exception(cls, exc):
-        return cls(None, None, exc)
-
-    @classmethod
-    def stop(cls):
-        return cls(None, True, None)
-
-
-def tail_container_logs(container, presenter, queue, log_args):
-    generator = get_log_generator(container)
-
-    try:
-        for item in generator(container, log_args):
-            queue.put(QueueItem.new(presenter.present(container, item)))
-    except Exception as e:
-        queue.put(QueueItem.exception(e))
-        return
-
-    if log_args.get('follow'):
-        queue.put(QueueItem.new(presenter.color_func(wait_on_exit(container))))
-    queue.put(QueueItem.stop())
-
-
-def get_log_generator(container):
-    if container.has_api_logs:
-        return build_log_generator
-    return build_no_log_generator
-
-
-def build_no_log_generator(container, log_args):
-    """Return a generator that prints a warning about logs and waits for
-    container to exit.
-    """
-    yield "WARNING: no logs are available with the '{}' log driver\n".format(
-        container.log_driver)
-
-
-def build_log_generator(container, log_args):
-    # if the container doesn't have a log_stream we need to attach to container
-    # before log printer starts running
-    if container.log_stream is None:
-        stream = container.logs(stdout=True, stderr=True, stream=True, **log_args)
-    else:
-        stream = container.log_stream
-
-    return split_buffer(stream)
-
-
-def wait_on_exit(container):
-    exit_code = container.wait()
-    return "%s exited with code %s\n" % (container.name, exit_code)
-
-
-def start_producer_thread(thread_args):
-    producer = Thread(target=watch_events, args=thread_args)
-    producer.daemon = True
-    producer.start()
-
-
-def watch_events(thread_map, event_stream, presenters, thread_args):
-    for event in event_stream:
-        if event['action'] == 'stop':
-            thread_map.pop(event['id'], None)
-
-        if event['action'] != 'start':
-            continue
-
-        if event['id'] in thread_map:
-            if thread_map[event['id']].is_alive():
-                continue
-            # Container was stopped and started, we need a new thread
-            thread_map.pop(event['id'], None)
-
-        thread_map[event['id']] = build_thread(
-            event['container'],
-            next(presenters),
-            *thread_args)
-
-
-def consume_queue(queue, cascade_stop):
-    """Consume the queue by reading lines off of it and yielding them."""
-    while True:
-        try:
-            item = queue.get(timeout=0.1)
-        except Empty:
-            yield None
-            continue
-        # See https://github.com/docker/compose/issues/189
-        except thread.error:
-            raise ShutdownException()
-
-        if item.exc:
-            raise item.exc
-
-        if item.is_stop:
-            if cascade_stop:
-                raise StopIteration
-            else:
-                continue
-
-        yield item.item
diff --git a/env2/lib/python2.7/site-packages/compose/cli/main.py b/env2/lib/python2.7/site-packages/compose/cli/main.py
deleted file mode 100644
index b487bb7..0000000
--- a/env2/lib/python2.7/site-packages/compose/cli/main.py
+++ /dev/null
@@ -1,1046 +0,0 @@
-from __future__ import absolute_import
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import contextlib
-import functools
-import json
-import logging
-import re
-import sys
-from inspect import getdoc
-from operator import attrgetter
-
-from . import errors
-from . import signals
-from .. import __version__
-from ..bundle import get_image_digests
-from ..bundle import MissingDigests
-from ..bundle import serialize_bundle
-from ..config import ConfigurationError
-from ..config import parse_environment
-from ..config.environment import Environment
-from ..config.serialize import serialize_config
-from ..const import DEFAULT_TIMEOUT
-from ..const import IS_WINDOWS_PLATFORM
-from ..progress_stream import StreamOutputError
-from ..project import NoSuchService
-from ..project import OneOffFilter
-from ..project import ProjectError
-from ..service import BuildAction
-from ..service import BuildError
-from ..service import ConvergenceStrategy
-from ..service import ImageType
-from ..service import NeedsBuildError
-from ..service import OperationFailedError
-from .command import get_config_from_options
-from .command import project_from_options
-from .docopt_command import DocoptDispatcher
-from .docopt_command import get_handler
-from .docopt_command import NoSuchCommand
-from .errors import UserError
-from .formatter import ConsoleWarningFormatter
-from .formatter import Formatter
-from .log_printer import build_log_presenters
-from .log_printer import LogPrinter
-from .utils import get_version_info
-from .utils import yesno
-
-
-if not IS_WINDOWS_PLATFORM:
-    from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation
-
-log = logging.getLogger(__name__)
-console_handler = logging.StreamHandler(sys.stderr)
-
-
-def main():
-    command = dispatch()
-
-    try:
-        command()
-    except (KeyboardInterrupt, signals.ShutdownException):
-        log.error("Aborting.")
-        sys.exit(1)
-    except (UserError, NoSuchService, ConfigurationError,
-            ProjectError, OperationFailedError) as e:
-        log.error(e.msg)
-        sys.exit(1)
-    except BuildError as e:
-        log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason))
-        sys.exit(1)
-    except StreamOutputError as e:
-        log.error(e)
-        sys.exit(1)
-    except NeedsBuildError as e:
-        log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name)
-        sys.exit(1)
-    except errors.ConnectionError:
-        sys.exit(1)
-
-
-def dispatch():
-    setup_logging()
-    dispatcher = DocoptDispatcher(
-        TopLevelCommand,
-        {'options_first': True, 'version': get_version_info('compose')})
-
-    try:
-        options, handler, command_options = dispatcher.parse(sys.argv[1:])
-    except NoSuchCommand as e:
-        commands = "\n".join(parse_doc_section("commands:", getdoc(e.supercommand)))
-        log.error("No such command: %s\n\n%s", e.command, commands)
-        sys.exit(1)
-
-    setup_console_handler(console_handler, options.get('--verbose'))
-    return functools.partial(perform_command, options, handler, command_options)
-
-
-def perform_command(options, handler, command_options):
-    if options['COMMAND'] in ('help', 'version'):
-        # Skip looking up the compose file.
-        handler(command_options)
-        return
-
-    if options['COMMAND'] in ('config', 'bundle'):
-        command = TopLevelCommand(None)
-        handler(command, options, command_options)
-        return
-
-    project = project_from_options('.', options)
-    command = TopLevelCommand(project)
-    with errors.handle_connection_errors(project.client):
-        handler(command, command_options)
-
-
-def setup_logging():
-    root_logger = logging.getLogger()
-    root_logger.addHandler(console_handler)
-    root_logger.setLevel(logging.DEBUG)
-
-    # Disable requests logging
-    logging.getLogger("requests").propagate = False
-
-
-def setup_console_handler(handler, verbose):
-    if handler.stream.isatty():
-        format_class = ConsoleWarningFormatter
-    else:
-        format_class = logging.Formatter
-
-    if verbose:
-        handler.setFormatter(format_class('%(name)s.%(funcName)s: %(message)s'))
-        handler.setLevel(logging.DEBUG)
-    else:
-        handler.setFormatter(format_class())
-        handler.setLevel(logging.INFO)
-
-
-# stolen from docopt master
-def parse_doc_section(name, source):
-    pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)',
-                         re.IGNORECASE | re.MULTILINE)
-    return [s.strip() for s in pattern.findall(source)]
-
-
-class TopLevelCommand(object):
-    """Define and run multi-container applications with Docker.
-
-    Usage:
-      docker-compose [-f <arg>...] [options] [COMMAND] [ARGS...]
-      docker-compose -h|--help
-
-    Options:
-      -f, --file FILE             Specify an alternate compose file (default: docker-compose.yml)
-      -p, --project-name NAME     Specify an alternate project name (default: directory name)
-      --verbose                   Show more output
-      -v, --version               Print version and exit
-      -H, --host HOST             Daemon socket to connect to
-
-      --tls                       Use TLS; implied by --tlsverify
-      --tlscacert CA_PATH         Trust certs signed only by this CA
-      --tlscert CLIENT_CERT_PATH  Path to TLS certificate file
-      --tlskey TLS_KEY_PATH       Path to TLS key file
-      --tlsverify                 Use TLS and verify the remote
-      --skip-hostname-check       Don't check the daemon's hostname against the name specified
-                                  in the client certificate (for example if your docker host
-                                  is an IP address)
-
-    Commands:
-      build              Build or rebuild services
-      bundle             Generate a Docker bundle from the Compose file
-      config             Validate and view the compose file
-      create             Create services
-      down               Stop and remove containers, networks, images, and volumes
-      events             Receive real time events from containers
-      exec               Execute a command in a running container
-      help               Get help on a command
-      kill               Kill containers
-      logs               View output from containers
-      pause              Pause services
-      port               Print the public port for a port binding
-      ps                 List containers
-      pull               Pulls service images
-      push               Push service images
-      restart            Restart services
-      rm                 Remove stopped containers
-      run                Run a one-off command
-      scale              Set number of containers for a service
-      start              Start services
-      stop               Stop services
-      unpause            Unpause services
-      up                 Create and start containers
-      version            Show the Docker-Compose version information
-    """
-
-    def __init__(self, project, project_dir='.'):
-        self.project = project
-        self.project_dir = '.'
-
-    def build(self, options):
-        """
-        Build or rebuild services.
-
-        Services are built once and then tagged as `project_service`,
-        e.g. `composetest_db`. If you change a service's `Dockerfile` or the
-        contents of its build directory, you can run `docker-compose build` to rebuild it.
-
-        Usage: build [options] [SERVICE...]
-
-        Options:
-            --force-rm  Always remove intermediate containers.
-            --no-cache  Do not use cache when building the image.
-            --pull      Always attempt to pull a newer version of the image.
-        """
-        self.project.build(
-            service_names=options['SERVICE'],
-            no_cache=bool(options.get('--no-cache', False)),
-            pull=bool(options.get('--pull', False)),
-            force_rm=bool(options.get('--force-rm', False)))
-
-    def bundle(self, config_options, options):
-        """
-        Generate a Distributed Application Bundle (DAB) from the Compose file.
-
-        Images must have digests stored, which requires interaction with a
-        Docker registry. If digests aren't stored for all images, you can fetch
-        them with `docker-compose pull` or `docker-compose push`. To push images
-        automatically when bundling, pass `--push-images`. Only services with
-        a `build` option specified will have their images pushed.
-
-        Usage: bundle [options]
-
-        Options:
-            --push-images              Automatically push images for any services
-                                       which have a `build` option specified.
-
-            -o, --output PATH          Path to write the bundle file to.
-                                       Defaults to "<project name>.dab".
-        """
-        self.project = project_from_options('.', config_options)
-        compose_config = get_config_from_options(self.project_dir, config_options)
-
-        output = options["--output"]
-        if not output:
-            output = "{}.dab".format(self.project.name)
-
-        with errors.handle_connection_errors(self.project.client):
-            try:
-                image_digests = get_image_digests(
-                    self.project,
-                    allow_push=options['--push-images'],
-                )
-            except MissingDigests as e:
-                def list_images(images):
-                    return "\n".join("    {}".format(name) for name in sorted(images))
-
-                paras = ["Some images are missing digests."]
-
-                if e.needs_push:
-                    command_hint = (
-                        "Use `docker-compose push {}` to push them. "
-                        "You can do this automatically with `docker-compose bundle --push-images`."
-                        .format(" ".join(sorted(e.needs_push)))
-                    )
-                    paras += [
-                        "The following images can be pushed:",
-                        list_images(e.needs_push),
-                        command_hint,
-                    ]
-
-                if e.needs_pull:
-                    command_hint = (
-                        "Use `docker-compose pull {}` to pull them. "
-                        .format(" ".join(sorted(e.needs_pull)))
-                    )
-
-                    paras += [
-                        "The following images need to be pulled:",
-                        list_images(e.needs_pull),
-                        command_hint,
-                    ]
-
-                raise UserError("\n\n".join(paras))
-
-        with open(output, 'w') as f:
-            f.write(serialize_bundle(compose_config, image_digests))
-
-        log.info("Wrote bundle to {}".format(output))
-
-    def config(self, config_options, options):
-        """
-        Validate and view the compose file.
-
-        Usage: config [options]
-
-        Options:
-            -q, --quiet     Only validate the configuration, don't print
-                            anything.
-            --services      Print the service names, one per line.
-
-        """
-        compose_config = get_config_from_options(self.project_dir, config_options)
-
-        if options['--quiet']:
-            return
-
-        if options['--services']:
-            print('\n'.join(service['name'] for service in compose_config.services))
-            return
-
-        print(serialize_config(compose_config))
-
-    def create(self, options):
-        """
-        Creates containers for a service.
-
-        Usage: create [options] [SERVICE...]
-
-        Options:
-            --force-recreate       Recreate containers even if their configuration and
-                                   image haven't changed. Incompatible with --no-recreate.
-            --no-recreate          If containers already exist, don't recreate them.
-                                   Incompatible with --force-recreate.
-            --no-build             Don't build an image, even if it's missing.
-            --build                Build images before creating containers.
-        """
-        service_names = options['SERVICE']
-
-        self.project.create(
-            service_names=service_names,
-            strategy=convergence_strategy_from_opts(options),
-            do_build=build_action_from_opts(options),
-        )
-
-    def down(self, options):
-        """
-        Stops containers and removes containers, networks, volumes, and images
-        created by `up`.
-
-        By default, the only things removed are:
-
-        - Containers for services defined in the Compose file
-        - Networks defined in the `networks` section of the Compose file
-        - The default network, if one is used
-
-        Networks and volumes defined as `external` are never removed.
-
-        Usage: down [options]
-
-        Options:
-            --rmi type          Remove images. Type must be one of:
-                                'all': Remove all images used by any service.
-                                'local': Remove only images that don't have a custom tag
-                                set by the `image` field.
-            -v, --volumes       Remove named volumes declared in the `volumes` section
-                                of the Compose file and anonymous volumes
-                                attached to containers.
-            --remove-orphans    Remove containers for services not defined in the
-                                Compose file
-        """
-        image_type = image_type_from_opt('--rmi', options['--rmi'])
-        self.project.down(image_type, options['--volumes'], options['--remove-orphans'])
-
-    def events(self, options):
-        """
-        Receive real time events from containers.
-
-        Usage: events [options] [SERVICE...]
-
-        Options:
-            --json      Output events as a stream of json objects
-        """
-        def format_event(event):
-            attributes = ["%s=%s" % item for item in event['attributes'].items()]
-            return ("{time} {type} {action} {id} ({attrs})").format(
-                attrs=", ".join(sorted(attributes)),
-                **event)
-
-        def json_format_event(event):
-            event['time'] = event['time'].isoformat()
-            event.pop('container')
-            return json.dumps(event)
-
-        for event in self.project.events():
-            formatter = json_format_event if options['--json'] else format_event
-            print(formatter(event))
-            sys.stdout.flush()
-
-    def exec_command(self, options):
-        """
-        Execute a command in a running container
-
-        Usage: exec [options] SERVICE COMMAND [ARGS...]
-
-        Options:
-            -d                Detached mode: Run command in the background.
-            --privileged      Give extended privileges to the process.
-            --user USER       Run the command as this user.
-            -T                Disable pseudo-tty allocation. By default `docker-compose exec`
-                              allocates a TTY.
-            --index=index     index of the container if there are multiple
-                              instances of a service [default: 1]
-        """
-        index = int(options.get('--index'))
-        service = self.project.get_service(options['SERVICE'])
-        detach = options['-d']
-
-        if IS_WINDOWS_PLATFORM and not detach:
-            raise UserError(
-                "Interactive mode is not yet supported on Windows.\n"
-                "Please pass the -d flag when using `docker-compose exec`."
-            )
-        try:
-            container = service.get_container(number=index)
-        except ValueError as e:
-            raise UserError(str(e))
-        command = [options['COMMAND']] + options['ARGS']
-        tty = not options["-T"]
-
-        create_exec_options = {
-            "privileged": options["--privileged"],
-            "user": options["--user"],
-            "tty": tty,
-            "stdin": tty,
-        }
-
-        exec_id = container.create_exec(command, **create_exec_options)
-
-        if detach:
-            container.start_exec(exec_id, tty=tty)
-            return
-
-        signals.set_signal_handler_to_shutdown()
-        try:
-            operation = ExecOperation(
-                self.project.client,
-                exec_id,
-                interactive=tty,
-            )
-            pty = PseudoTerminal(self.project.client, operation)
-            pty.start()
-        except signals.ShutdownException:
-            log.info("received shutdown exception: closing")
-        exit_code = self.project.client.exec_inspect(exec_id).get("ExitCode")
-        sys.exit(exit_code)
-
-    @classmethod
-    def help(cls, options):
-        """
-        Get help on a command.
-
-        Usage: help [COMMAND]
-        """
-        if options['COMMAND']:
-            subject = get_handler(cls, options['COMMAND'])
-        else:
-            subject = cls
-
-        print(getdoc(subject))
-
-    def kill(self, options):
-        """
-        Force stop service containers.
-
-        Usage: kill [options] [SERVICE...]
-
-        Options:
-            -s SIGNAL         SIGNAL to send to the container.
-                              Default signal is SIGKILL.
-        """
-        signal = options.get('-s', 'SIGKILL')
-
-        self.project.kill(service_names=options['SERVICE'], signal=signal)
-
-    def logs(self, options):
-        """
-        View output from containers.
-
-        Usage: logs [options] [SERVICE...]
-
-        Options:
-            --no-color          Produce monochrome output.
-            -f, --follow        Follow log output.
-            -t, --timestamps    Show timestamps.
-            --tail="all"        Number of lines to show from the end of the logs
-                                for each container.
-        """
-        containers = self.project.containers(service_names=options['SERVICE'], stopped=True)
-
-        tail = options['--tail']
-        if tail is not None:
-            if tail.isdigit():
-                tail = int(tail)
-            elif tail != 'all':
-                raise UserError("tail flag must be all or a number")
-        log_args = {
-            'follow': options['--follow'],
-            'tail': tail,
-            'timestamps': options['--timestamps']
-        }
-        print("Attaching to", list_containers(containers))
-        log_printer_from_project(
-            self.project,
-            containers,
-            options['--no-color'],
-            log_args,
-            event_stream=self.project.events(service_names=options['SERVICE'])).run()
-
-    def pause(self, options):
-        """
-        Pause services.
-
-        Usage: pause [SERVICE...]
-        """
-        containers = self.project.pause(service_names=options['SERVICE'])
-        exit_if(not containers, 'No containers to pause', 1)
-
-    def port(self, options):
-        """
-        Print the public port for a port binding.
-
-        Usage: port [options] SERVICE PRIVATE_PORT
-
-        Options:
-            --protocol=proto  tcp or udp [default: tcp]
-            --index=index     index of the container if there are multiple
-                              instances of a service [default: 1]
-        """
-        index = int(options.get('--index'))
-        service = self.project.get_service(options['SERVICE'])
-        try:
-            container = service.get_container(number=index)
-        except ValueError as e:
-            raise UserError(str(e))
-        print(container.get_local_port(
-            options['PRIVATE_PORT'],
-            protocol=options.get('--protocol') or 'tcp') or '')
-
-    def ps(self, options):
-        """
-        List containers.
-
-        Usage: ps [options] [SERVICE...]
-
-        Options:
-            -q    Only display IDs
-        """
-        containers = sorted(
-            self.project.containers(service_names=options['SERVICE'], stopped=True) +
-            self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
-            key=attrgetter('name'))
-
-        if options['-q']:
-            for container in containers:
-                print(container.id)
-        else:
-            headers = [
-                'Name',
-                'Command',
-                'State',
-                'Ports',
-            ]
-            rows = []
-            for container in containers:
-                command = container.human_readable_command
-                if len(command) > 30:
-                    command = '%s ...' % command[:26]
-                rows.append([
-                    container.name,
-                    command,
-                    container.human_readable_state,
-                    container.human_readable_ports,
-                ])
-            print(Formatter().table(headers, rows))
-
-    def pull(self, options):
-        """
-        Pulls images for services.
-
-        Usage: pull [options] [SERVICE...]
-
-        Options:
-            --ignore-pull-failures  Pull what it can and ignores images with pull failures.
-        """
-        self.project.pull(
-            service_names=options['SERVICE'],
-            ignore_pull_failures=options.get('--ignore-pull-failures')
-        )
-
-    def push(self, options):
-        """
-        Pushes images for services.
-
-        Usage: push [options] [SERVICE...]
-
-        Options:
-            --ignore-push-failures  Push what it can and ignores images with push failures.
-        """
-        self.project.push(
-            service_names=options['SERVICE'],
-            ignore_push_failures=options.get('--ignore-push-failures')
-        )
-
-    def rm(self, options):
-        """
-        Removes stopped service containers.
-
-        By default, anonymous volumes attached to containers will not be removed. You
-        can override this with `-v`. To list all volumes, use `docker volume ls`.
-
-        Any data which is not in a volume will be lost.
-
-        Usage: rm [options] [SERVICE...]
-
-        Options:
-            -f, --force   Don't ask to confirm removal
-            -v            Remove any anonymous volumes attached to containers
-            -a, --all     Obsolete. Also remove one-off containers created by
-                          docker-compose run
-        """
-        if options.get('--all'):
-            log.warn(
-                '--all flag is obsolete. This is now the default behavior '
-                'of `docker-compose rm`'
-            )
-        one_off = OneOffFilter.include
-
-        all_containers = self.project.containers(
-            service_names=options['SERVICE'], stopped=True, one_off=one_off
-        )
-        stopped_containers = [c for c in all_containers if not c.is_running]
-
-        if len(stopped_containers) > 0:
-            print("Going to remove", list_containers(stopped_containers))
-            if options.get('--force') \
-                    or yesno("Are you sure? [yN] ", default=False):
-                self.project.remove_stopped(
-                    service_names=options['SERVICE'],
-                    v=options.get('-v', False),
-                    one_off=one_off
-                )
-        else:
-            print("No stopped containers")
-
-    def run(self, options):
-        """
-        Run a one-off command on a service.
-
-        For example:
-
-            $ docker-compose run web python manage.py shell
-
-        By default, linked services will be started, unless they are already
-        running. If you do not want to start linked services, use
-        `docker-compose run --no-deps SERVICE COMMAND [ARGS...]`.
-
-        Usage: run [options] [-p PORT...] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
-
-        Options:
-            -d                    Detached mode: Run container in the background, print
-                                  new container name.
-            --name NAME           Assign a name to the container
-            --entrypoint CMD      Override the entrypoint of the image.
-            -e KEY=VAL            Set an environment variable (can be used multiple times)
-            -u, --user=""         Run as specified username or uid
-            --no-deps             Don't start linked services.
-            --rm                  Remove container after run. Ignored in detached mode.
-            -p, --publish=[]      Publish a container's port(s) to the host
-            --service-ports       Run command with the service's ports enabled and mapped
-                                  to the host.
-            -T                    Disable pseudo-tty allocation. By default `docker-compose run`
-                                  allocates a TTY.
-            -w, --workdir=""      Working directory inside the container
-        """
-        service = self.project.get_service(options['SERVICE'])
-        detach = options['-d']
-
-        if IS_WINDOWS_PLATFORM and not detach:
-            raise UserError(
-                "Interactive mode is not yet supported on Windows.\n"
-                "Please pass the -d flag when using `docker-compose run`."
-            )
-
-        if options['--publish'] and options['--service-ports']:
-            raise UserError(
-                'Service port mapping and manual port mapping '
-                'can not be used togather'
-            )
-
-        if options['COMMAND'] is not None:
-            command = [options['COMMAND']] + options['ARGS']
-        elif options['--entrypoint'] is not None:
-            command = []
-        else:
-            command = service.options.get('command')
-
-        container_options = build_container_options(options, detach, command)
-        run_one_off_container(container_options, self.project, service, options)
-
-    def scale(self, options):
-        """
-        Set number of containers to run for a service.
-
-        Numbers are specified in the form `service=num` as arguments.
-        For example:
-
-            $ docker-compose scale web=2 worker=3
-
-        Usage: scale [options] [SERVICE=NUM...]
-
-        Options:
-          -t, --timeout TIMEOUT      Specify a shutdown timeout in seconds.
-                                     (default: 10)
-        """
-        timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
-
-        for s in options['SERVICE=NUM']:
-            if '=' not in s:
-                raise UserError('Arguments to scale should be in the form service=num')
-            service_name, num = s.split('=', 1)
-            try:
-                num = int(num)
-            except ValueError:
-                raise UserError('Number of containers for service "%s" is not a '
-                                'number' % service_name)
-            self.project.get_service(service_name).scale(num, timeout=timeout)
-
-    def start(self, options):
-        """
-        Start existing containers.
-
-        Usage: start [SERVICE...]
-        """
-        containers = self.project.start(service_names=options['SERVICE'])
-        exit_if(not containers, 'No containers to start', 1)
-
-    def stop(self, options):
-        """
-        Stop running containers without removing them.
-
-        They can be started again with `docker-compose start`.
-
-        Usage: stop [options] [SERVICE...]
-
-        Options:
-          -t, --timeout TIMEOUT      Specify a shutdown timeout in seconds.
-                                     (default: 10)
-        """
-        timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
-        self.project.stop(service_names=options['SERVICE'], timeout=timeout)
-
-    def restart(self, options):
-        """
-        Restart running containers.
-
-        Usage: restart [options] [SERVICE...]
-
-        Options:
-          -t, --timeout TIMEOUT      Specify a shutdown timeout in seconds.
-                                     (default: 10)
-        """
-        timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
-        containers = self.project.restart(service_names=options['SERVICE'], timeout=timeout)
-        exit_if(not containers, 'No containers to restart', 1)
-
-    def unpause(self, options):
-        """
-        Unpause services.
-
-        Usage: unpause [SERVICE...]
-        """
-        containers = self.project.unpause(service_names=options['SERVICE'])
-        exit_if(not containers, 'No containers to unpause', 1)
-
-    def up(self, options):
-        """
-        Builds, (re)creates, starts, and attaches to containers for a service.
-
-        Unless they are already running, this command also starts any linked services.
-
-        The `docker-compose up` command aggregates the output of each container. When
-        the command exits, all containers are stopped. Running `docker-compose up -d`
-        starts the containers in the background and leaves them running.
-
-        If there are existing containers for a service, and the service's configuration
-        or image was changed after the container's creation, `docker-compose up` picks
-        up the changes by stopping and recreating the containers (preserving mounted
-        volumes). To prevent Compose from picking up changes, use the `--no-recreate`
-        flag.
-
-        If you want to force Compose to stop and recreate all containers, use the
-        `--force-recreate` flag.
-
-        Usage: up [options] [SERVICE...]
-
-        Options:
-            -d                         Detached mode: Run containers in the background,
-                                       print new container names.
-                                       Incompatible with --abort-on-container-exit.
-            --no-color                 Produce monochrome output.
-            --no-deps                  Don't start linked services.
-            --force-recreate           Recreate containers even if their configuration
-                                       and image haven't changed.
-                                       Incompatible with --no-recreate.
-            --no-recreate              If containers already exist, don't recreate them.
-                                       Incompatible with --force-recreate.
-            --no-build                 Don't build an image, even if it's missing.
-            --build                    Build images before starting containers.
-            --abort-on-container-exit  Stops all containers if any container was stopped.
-                                       Incompatible with -d.
-            -t, --timeout TIMEOUT      Use this timeout in seconds for container shutdown
-                                       when attached or when containers are already
-                                       running. (default: 10)
-            --remove-orphans           Remove containers for services not
-                                       defined in the Compose file
-        """
-        start_deps = not options['--no-deps']
-        cascade_stop = options['--abort-on-container-exit']
-        service_names = options['SERVICE']
-        timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
-        remove_orphans = options['--remove-orphans']
-        detached = options.get('-d')
-
-        if detached and cascade_stop:
-            raise UserError("--abort-on-container-exit and -d cannot be combined.")
-
-        with up_shutdown_context(self.project, service_names, timeout, detached):
-            to_attach = self.project.up(
-                service_names=service_names,
-                start_deps=start_deps,
-                strategy=convergence_strategy_from_opts(options),
-                do_build=build_action_from_opts(options),
-                timeout=timeout,
-                detached=detached,
-                remove_orphans=remove_orphans)
-
-            if detached:
-                return
-
-            log_printer = log_printer_from_project(
-                self.project,
-                filter_containers_to_service_names(to_attach, service_names),
-                options['--no-color'],
-                {'follow': True},
-                cascade_stop,
-                event_stream=self.project.events(service_names=service_names))
-            print("Attaching to", list_containers(log_printer.containers))
-            log_printer.run()
-
-            if cascade_stop:
-                print("Aborting on container exit...")
-                self.project.stop(service_names=service_names, timeout=timeout)
-
-    @classmethod
-    def version(cls, options):
-        """
-        Show version informations
-
-        Usage: version [--short]
-
-        Options:
-            --short     Shows only Compose's version number.
-        """
-        if options['--short']:
-            print(__version__)
-        else:
-            print(get_version_info('full'))
-
-
-def convergence_strategy_from_opts(options):
-    no_recreate = options['--no-recreate']
-    force_recreate = options['--force-recreate']
-    if force_recreate and no_recreate:
-        raise UserError("--force-recreate and --no-recreate cannot be combined.")
-
-    if force_recreate:
-        return ConvergenceStrategy.always
-
-    if no_recreate:
-        return ConvergenceStrategy.never
-
-    return ConvergenceStrategy.changed
-
-
-def image_type_from_opt(flag, value):
-    if not value:
-        return ImageType.none
-    try:
-        return ImageType[value]
-    except KeyError:
-        raise UserError("%s flag must be one of: all, local" % flag)
-
-
-def build_action_from_opts(options):
-    if options['--build'] and options['--no-build']:
-        raise UserError("--build and --no-build can not be combined.")
-
-    if options['--build']:
-        return BuildAction.force
-
-    if options['--no-build']:
-        return BuildAction.skip
-
-    return BuildAction.none
-
-
-def build_container_options(options, detach, command):
-    container_options = {
-        'command': command,
-        'tty': not (detach or options['-T'] or not sys.stdin.isatty()),
-        'stdin_open': not detach,
-        'detach': detach,
-    }
-
-    if options['-e']:
-        container_options['environment'] = Environment.from_command_line(
-            parse_environment(options['-e'])
-        )
-
-    if options['--entrypoint']:
-        container_options['entrypoint'] = options.get('--entrypoint')
-
-    if options['--rm']:
-        container_options['restart'] = None
-
-    if options['--user']:
-        container_options['user'] = options.get('--user')
-
-    if not options['--service-ports']:
-        container_options['ports'] = []
-
-    if options['--publish']:
-        container_options['ports'] = options.get('--publish')
-
-    if options['--name']:
-        container_options['name'] = options['--name']
-
-    if options['--workdir']:
-        container_options['working_dir'] = options['--workdir']
-
-    return container_options
-
-
-def run_one_off_container(container_options, project, service, options):
-    if not options['--no-deps']:
-        deps = service.get_dependency_names()
-        if deps:
-            project.up(
-                service_names=deps,
-                start_deps=True,
-                strategy=ConvergenceStrategy.never)
-
-    project.initialize()
-
-    container = service.create_container(
-        quiet=True,
-        one_off=True,
-        **container_options)
-
-    if options['-d']:
-        service.start_container(container)
-        print(container.name)
-        return
-
-    def remove_container(force=False):
-        if options['--rm']:
-            project.client.remove_container(container.id, force=True)
-
-    signals.set_signal_handler_to_shutdown()
-    try:
-        try:
-            operation = RunOperation(
-                project.client,
-                container.id,
-                interactive=not options['-T'],
-                logs=False,
-            )
-            pty = PseudoTerminal(project.client, operation)
-            sockets = pty.sockets()
-            service.start_container(container)
-            pty.start(sockets)
-            exit_code = container.wait()
-        except signals.ShutdownException:
-            project.client.stop(container.id)
-            exit_code = 1
-    except signals.ShutdownException:
-        project.client.kill(container.id)
-        remove_container(force=True)
-        sys.exit(2)
-
-    remove_container()
-    sys.exit(exit_code)
-
-
-def log_printer_from_project(
-    project,
-    containers,
-    monochrome,
-    log_args,
-    cascade_stop=False,
-    event_stream=None,
-):
-    return LogPrinter(
-        containers,
-        build_log_presenters(project.service_names, monochrome),
-        event_stream or project.events(),
-        cascade_stop=cascade_stop,
-        log_args=log_args)
-
-
-def filter_containers_to_service_names(containers, service_names):
-    if not service_names:
-        return containers
-
-    return [
-        container
-        for container in containers if container.service in service_names
-    ]
-
-
-@contextlib.contextmanager
-def up_shutdown_context(project, service_names, timeout, detached):
-    if detached:
-        yield
-        return
-
-    signals.set_signal_handler_to_shutdown()
-    try:
-        try:
-            yield
-        except signals.ShutdownException:
-            print("Gracefully stopping... (press Ctrl+C again to force)")
-            project.stop(service_names=service_names, timeout=timeout)
-    except signals.ShutdownException:
-        project.kill(service_names=service_names)
-        sys.exit(2)
-
-
-def list_containers(containers):
-    return ", ".join(c.name for c in containers)
-
-
-def exit_if(condition, message, exit_code):
-    if condition:
-        log.error(message)
-        raise SystemExit(exit_code)
diff --git a/env2/lib/python2.7/site-packages/compose/cli/signals.py b/env2/lib/python2.7/site-packages/compose/cli/signals.py
deleted file mode 100644
index 68a0598..0000000
--- a/env2/lib/python2.7/site-packages/compose/cli/signals.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import signal
-
-
-class ShutdownException(Exception):
-    pass
-
-
-def shutdown(signal, frame):
-    raise ShutdownException()
-
-
-def set_signal_handler(handler):
-    signal.signal(signal.SIGINT, handler)
-    signal.signal(signal.SIGTERM, handler)
-
-
-def set_signal_handler_to_shutdown():
-    set_signal_handler(shutdown)
diff --git a/env2/lib/python2.7/site-packages/compose/cli/utils.py b/env2/lib/python2.7/site-packages/compose/cli/utils.py
deleted file mode 100644
index f60f61c..0000000
--- a/env2/lib/python2.7/site-packages/compose/cli/utils.py
+++ /dev/null
@@ -1,124 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import unicode_literals
-
-import os
-import platform
-import ssl
-import subprocess
-import sys
-
-import docker
-
-import compose
-
-# WindowsError is not defined on non-win32 platforms. Avoid runtime errors by
-# defining it as OSError (its parent class) if missing.
-try:
-    WindowsError
-except NameError:
-    WindowsError = OSError
-
-
-def yesno(prompt, default=None):
-    """
-    Prompt the user for a yes or no.
-
-    Can optionally specify a default value, which will only be
-    used if they enter a blank line.
-
-    Unrecognised input (anything other than "y", "n", "yes",
-    "no" or "") will return None.
-    """
-    answer = input(prompt).strip().lower()
-
-    if answer == "y" or answer == "yes":
-        return True
-    elif answer == "n" or answer == "no":
-        return False
-    elif answer == "":
-        return default
-    else:
-        return None
-
-
-def input(prompt):
-    """
-    Version of input (raw_input in Python 2) which forces a flush of sys.stdout
-    to avoid problems where the prompt fails to appear due to line buffering
-    """
-    sys.stdout.write(prompt)
-    sys.stdout.flush()
-    return sys.stdin.readline().rstrip('\n')
-
-
-def call_silently(*args, **kwargs):
-    """
-    Like subprocess.call(), but redirects stdout and stderr to /dev/null.
-    """
-    with open(os.devnull, 'w') as shutup:
-        try:
-            return subprocess.call(*args, stdout=shutup, stderr=shutup, **kwargs)
-        except WindowsError:
-            # On Windows, subprocess.call() can still raise exceptions. Normalize
-            # to POSIXy behaviour by returning a nonzero exit code.
-            return 1
-
-
-def is_mac():
-    return platform.system() == 'Darwin'
-
-
-def is_ubuntu():
-    return platform.system() == 'Linux' and platform.linux_distribution()[0] == 'Ubuntu'
-
-
-def get_version_info(scope):
-    versioninfo = 'docker-compose version {}, build {}'.format(
-        compose.__version__,
-        get_build_version())
-
-    if scope == 'compose':
-        return versioninfo
-    if scope == 'full':
-        return (
-            "{}\n"
-            "docker-py version: {}\n"
-            "{} version: {}\n"
-            "OpenSSL version: {}"
-        ).format(
-            versioninfo,
-            docker.version,
-            platform.python_implementation(),
-            platform.python_version(),
-            ssl.OPENSSL_VERSION)
-
-    raise ValueError("{} is not a valid version scope".format(scope))
-
-
-def get_build_version():
-    filename = os.path.join(os.path.dirname(compose.__file__), 'GITSHA')
-    if not os.path.exists(filename):
-        return 'unknown'
-
-    with open(filename) as fh:
-        return fh.read().strip()
-
-
-def is_docker_for_mac_installed():
-    return is_mac() and os.path.isdir('/Applications/Docker.app')
-
-
-def generate_user_agent():
-    parts = [
-        "docker-compose/{}".format(compose.__version__),
-        "docker-py/{}".format(docker.__version__),
-    ]
-    try:
-        p_system = platform.system()
-        p_release = platform.release()
-    except IOError:
-        pass
-    else:
-        parts.append("{}/{}".format(p_system, p_release))
-    return " ".join(parts)
diff --git a/env2/lib/python2.7/site-packages/compose/cli/verbose_proxy.py b/env2/lib/python2.7/site-packages/compose/cli/verbose_proxy.py
deleted file mode 100644
index b1592ea..0000000
--- a/env2/lib/python2.7/site-packages/compose/cli/verbose_proxy.py
+++ /dev/null
@@ -1,60 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import functools
-import logging
-import pprint
-from itertools import chain
-
-import six
-
-
-def format_call(args, kwargs):
-    args = (repr(a) for a in args)
-    kwargs = ("{0!s}={1!r}".format(*item) for item in six.iteritems(kwargs))
-    return "({0})".format(", ".join(chain(args, kwargs)))
-
-
-def format_return(result, max_lines):
-    if isinstance(result, (list, tuple, set)):
-        return "({0} with {1} items)".format(type(result).__name__, len(result))
-
-    if result:
-        lines = pprint.pformat(result).split('\n')
-        extra = '\n...' if len(lines) > max_lines else ''
-        return '\n'.join(lines[:max_lines]) + extra
-
-    return result
-
-
-class VerboseProxy(object):
-    """Proxy all function calls to another class and log method name, arguments
-    and return values for each call.
-    """
-
-    def __init__(self, obj_name, obj, log_name=None, max_lines=10):
-        self.obj_name = obj_name
-        self.obj = obj
-        self.max_lines = max_lines
-        self.log = logging.getLogger(log_name or __name__)
-
-    def __getattr__(self, name):
-        attr = getattr(self.obj, name)
-
-        if not six.callable(attr):
-            return attr
-
-        return functools.partial(self.proxy_callable, name)
-
-    def proxy_callable(self, call_name, *args, **kwargs):
-        self.log.info("%s %s <- %s",
-                      self.obj_name,
-                      call_name,
-                      format_call(args, kwargs))
-
-        result = getattr(self.obj, call_name)(*args, **kwargs)
-        self.log.info("%s %s -> %s",
-                      self.obj_name,
-                      call_name,
-                      format_return(result, self.max_lines))
-        return result
diff --git a/env2/lib/python2.7/site-packages/compose/config/__init__.py b/env2/lib/python2.7/site-packages/compose/config/__init__.py
deleted file mode 100644
index 7cf71eb..0000000
--- a/env2/lib/python2.7/site-packages/compose/config/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# flake8: noqa
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-from . import environment
-from .config import ConfigurationError
-from .config import DOCKER_CONFIG_KEYS
-from .config import find
-from .config import load
-from .config import merge_environment
-from .config import parse_environment
diff --git a/env2/lib/python2.7/site-packages/compose/config/config.py b/env2/lib/python2.7/site-packages/compose/config/config.py
deleted file mode 100644
index 7a2b3d3..0000000
--- a/env2/lib/python2.7/site-packages/compose/config/config.py
+++ /dev/null
@@ -1,997 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import functools
-import logging
-import ntpath
-import os
-import string
-import sys
-from collections import namedtuple
-
-import six
-import yaml
-from cached_property import cached_property
-
-from ..const import COMPOSEFILE_V1 as V1
-from ..const import COMPOSEFILE_V2_0 as V2_0
-from ..utils import build_string_dict
-from .environment import env_vars_from_file
-from .environment import Environment
-from .environment import split_env
-from .errors import CircularReference
-from .errors import ComposeFileNotFound
-from .errors import ConfigurationError
-from .errors import VERSION_EXPLANATION
-from .interpolation import interpolate_environment_variables
-from .sort_services import get_container_name_from_network_mode
-from .sort_services import get_service_name_from_network_mode
-from .sort_services import sort_service_dicts
-from .types import parse_extra_hosts
-from .types import parse_restart_spec
-from .types import ServiceLink
-from .types import VolumeFromSpec
-from .types import VolumeSpec
-from .validation import match_named_volumes
-from .validation import validate_against_config_schema
-from .validation import validate_config_section
-from .validation import validate_depends_on
-from .validation import validate_extends_file_path
-from .validation import validate_links
-from .validation import validate_network_mode
-from .validation import validate_service_constraints
-from .validation import validate_top_level_object
-from .validation import validate_ulimits
-
-
-DOCKER_CONFIG_KEYS = [
-    'cap_add',
-    'cap_drop',
-    'cgroup_parent',
-    'command',
-    'cpu_quota',
-    'cpu_shares',
-    'cpuset',
-    'detach',
-    'devices',
-    'dns',
-    'dns_search',
-    'domainname',
-    'entrypoint',
-    'env_file',
-    'environment',
-    'extra_hosts',
-    'hostname',
-    'image',
-    'ipc',
-    'labels',
-    'links',
-    'mac_address',
-    'mem_limit',
-    'memswap_limit',
-    'net',
-    'pid',
-    'ports',
-    'privileged',
-    'read_only',
-    'restart',
-    'security_opt',
-    'shm_size',
-    'stdin_open',
-    'stop_signal',
-    'tty',
-    'user',
-    'volume_driver',
-    'volumes',
-    'volumes_from',
-    'working_dir',
-]
-
-ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
-    'build',
-    'container_name',
-    'dockerfile',
-    'log_driver',
-    'log_opt',
-    'logging',
-    'network_mode',
-]
-
-DOCKER_VALID_URL_PREFIXES = (
-    'http://',
-    'https://',
-    'git://',
-    'github.com/',
-    'git@',
-)
-
-SUPPORTED_FILENAMES = [
-    'docker-compose.yml',
-    'docker-compose.yaml',
-]
-
-DEFAULT_OVERRIDE_FILENAME = 'docker-compose.override.yml'
-
-
-log = logging.getLogger(__name__)
-
-
-class ConfigDetails(namedtuple('_ConfigDetails', 'working_dir config_files environment')):
-    """
-    :param working_dir: the directory to use for relative paths in the config
-    :type  working_dir: string
-    :param config_files: list of configuration files to load
-    :type  config_files: list of :class:`ConfigFile`
-    :param environment: computed environment values for this project
-    :type  environment: :class:`environment.Environment`
-     """
-    def __new__(cls, working_dir, config_files, environment=None):
-        if environment is None:
-            environment = Environment.from_env_file(working_dir)
-        return super(ConfigDetails, cls).__new__(
-            cls, working_dir, config_files, environment
-        )
-
-
-class ConfigFile(namedtuple('_ConfigFile', 'filename config')):
-    """
-    :param filename: filename of the config file
-    :type  filename: string
-    :param config: contents of the config file
-    :type  config: :class:`dict`
-    """
-
-    @classmethod
-    def from_filename(cls, filename):
-        return cls(filename, load_yaml(filename))
-
-    @cached_property
-    def version(self):
-        if 'version' not in self.config:
-            return V1
-
-        version = self.config['version']
-
-        if isinstance(version, dict):
-            log.warn('Unexpected type for "version" key in "{}". Assuming '
-                     '"version" is the name of a service, and defaulting to '
-                     'Compose file version 1.'.format(self.filename))
-            return V1
-
-        if not isinstance(version, six.string_types):
-            raise ConfigurationError(
-                'Version in "{}" is invalid - it should be a string.'
-                .format(self.filename))
-
-        if version == '1':
-            raise ConfigurationError(
-                'Version in "{}" is invalid. {}'
-                .format(self.filename, VERSION_EXPLANATION))
-
-        if version == '2':
-            version = V2_0
-
-        if version != V2_0:
-            raise ConfigurationError(
-                'Version in "{}" is unsupported. {}'
-                .format(self.filename, VERSION_EXPLANATION))
-
-        return version
-
-    def get_service(self, name):
-        return self.get_service_dicts()[name]
-
-    def get_service_dicts(self):
-        return self.config if self.version == V1 else self.config.get('services', {})
-
-    def get_volumes(self):
-        return {} if self.version == V1 else self.config.get('volumes', {})
-
-    def get_networks(self):
-        return {} if self.version == V1 else self.config.get('networks', {})
-
-
-class Config(namedtuple('_Config', 'version services volumes networks')):
-    """
-    :param version: configuration version
-    :type  version: int
-    :param services: List of service description dictionaries
-    :type  services: :class:`list`
-    :param volumes: Dictionary mapping volume names to description dictionaries
-    :type  volumes: :class:`dict`
-    :param networks: Dictionary mapping network names to description dictionaries
-    :type  networks: :class:`dict`
-    """
-
-
-class ServiceConfig(namedtuple('_ServiceConfig', 'working_dir filename name config')):
-
-    @classmethod
-    def with_abs_paths(cls, working_dir, filename, name, config):
-        if not working_dir:
-            raise ValueError("No working_dir for ServiceConfig.")
-
-        return cls(
-            os.path.abspath(working_dir),
-            os.path.abspath(filename) if filename else filename,
-            name,
-            config)
-
-
-def find(base_dir, filenames, environment):
-    if filenames == ['-']:
-        return ConfigDetails(
-            os.getcwd(),
-            [ConfigFile(None, yaml.safe_load(sys.stdin))],
-            environment
-        )
-
-    if filenames:
-        filenames = [os.path.join(base_dir, f) for f in filenames]
-    else:
-        filenames = get_default_config_files(base_dir)
-
-    log.debug("Using configuration files: {}".format(",".join(filenames)))
-    return ConfigDetails(
-        os.path.dirname(filenames[0]),
-        [ConfigFile.from_filename(f) for f in filenames],
-        environment
-    )
-
-
-def validate_config_version(config_files):
-    main_file = config_files[0]
-    validate_top_level_object(main_file)
-    for next_file in config_files[1:]:
-        validate_top_level_object(next_file)
-
-        if main_file.version != next_file.version:
-            raise ConfigurationError(
-                "Version mismatch: file {0} specifies version {1} but "
-                "extension file {2} uses version {3}".format(
-                    main_file.filename,
-                    main_file.version,
-                    next_file.filename,
-                    next_file.version))
-
-
-def get_default_config_files(base_dir):
-    (candidates, path) = find_candidates_in_parent_dirs(SUPPORTED_FILENAMES, base_dir)
-
-    if not candidates:
-        raise ComposeFileNotFound(SUPPORTED_FILENAMES)
-
-    winner = candidates[0]
-
-    if len(candidates) > 1:
-        log.warn("Found multiple config files with supported names: %s", ", ".join(candidates))
-        log.warn("Using %s\n", winner)
-
-    return [os.path.join(path, winner)] + get_default_override_file(path)
-
-
-def get_default_override_file(path):
-    override_filename = os.path.join(path, DEFAULT_OVERRIDE_FILENAME)
-    return [override_filename] if os.path.exists(override_filename) else []
-
-
-def find_candidates_in_parent_dirs(filenames, path):
-    """
-    Given a directory path to start, looks for filenames in the
-    directory, and then each parent directory successively,
-    until found.
-
-    Returns tuple (candidates, path).
-    """
-    candidates = [filename for filename in filenames
-                  if os.path.exists(os.path.join(path, filename))]
-
-    if not candidates:
-        parent_dir = os.path.join(path, '..')
-        if os.path.abspath(parent_dir) != os.path.abspath(path):
-            return find_candidates_in_parent_dirs(filenames, parent_dir)
-
-    return (candidates, path)
-
-
-def load(config_details):
-    """Load the configuration from a working directory and a list of
-    configuration files.  Files are loaded in order, and merged on top
-    of each other to create the final configuration.
-
-    Return a fully interpolated, extended and validated configuration.
-    """
-    validate_config_version(config_details.config_files)
-
-    processed_files = [
-        process_config_file(config_file, config_details.environment)
-        for config_file in config_details.config_files
-    ]
-    config_details = config_details._replace(config_files=processed_files)
-
-    main_file = config_details.config_files[0]
-    volumes = load_mapping(
-        config_details.config_files, 'get_volumes', 'Volume'
-    )
-    networks = load_mapping(
-        config_details.config_files, 'get_networks', 'Network'
-    )
-    service_dicts = load_services(config_details, main_file)
-
-    if main_file.version != V1:
-        for service_dict in service_dicts:
-            match_named_volumes(service_dict, volumes)
-
-    return Config(main_file.version, service_dicts, volumes, networks)
-
-
-def load_mapping(config_files, get_func, entity_type):
-    mapping = {}
-
-    for config_file in config_files:
-        for name, config in getattr(config_file, get_func)().items():
-            mapping[name] = config or {}
-            if not config:
-                continue
-
-            external = config.get('external')
-            if external:
-                if len(config.keys()) > 1:
-                    raise ConfigurationError(
-                        '{} {} declared as external but specifies'
-                        ' additional attributes ({}). '.format(
-                            entity_type,
-                            name,
-                            ', '.join([k for k in config.keys() if k != 'external'])
-                        )
-                    )
-                if isinstance(external, dict):
-                    config['external_name'] = external.get('name')
-                else:
-                    config['external_name'] = name
-
-            mapping[name] = config
-
-            if 'driver_opts' in config:
-                config['driver_opts'] = build_string_dict(
-                    config['driver_opts']
-                )
-
-    return mapping
-
-
-def load_services(config_details, config_file):
-    def build_service(service_name, service_dict, service_names):
-        service_config = ServiceConfig.with_abs_paths(
-            config_details.working_dir,
-            config_file.filename,
-            service_name,
-            service_dict)
-        resolver = ServiceExtendsResolver(
-            service_config, config_file, environment=config_details.environment
-        )
-        service_dict = process_service(resolver.run())
-
-        service_config = service_config._replace(config=service_dict)
-        validate_service(service_config, service_names, config_file.version)
-        service_dict = finalize_service(
-            service_config,
-            service_names,
-            config_file.version,
-            config_details.environment)
-        return service_dict
-
-    def build_services(service_config):
-        service_names = service_config.keys()
-        return sort_service_dicts([
-            build_service(name, service_dict, service_names)
-            for name, service_dict in service_config.items()
-        ])
-
-    def merge_services(base, override):
-        all_service_names = set(base) | set(override)
-        return {
-            name: merge_service_dicts_from_files(
-                base.get(name, {}),
-                override.get(name, {}),
-                config_file.version)
-            for name in all_service_names
-        }
-
-    service_configs = [
-        file.get_service_dicts() for file in config_details.config_files
-    ]
-
-    service_config = service_configs[0]
-    for next_config in service_configs[1:]:
-        service_config = merge_services(service_config, next_config)
-
-    return build_services(service_config)
-
-
-def interpolate_config_section(filename, config, section, environment):
-    validate_config_section(filename, config, section)
-    return interpolate_environment_variables(config, section, environment)
-
-
-def process_config_file(config_file, environment, service_name=None):
-    services = interpolate_config_section(
-        config_file.filename,
-        config_file.get_service_dicts(),
-        'service',
-        environment,)
-
-    if config_file.version == V2_0:
-        processed_config = dict(config_file.config)
-        processed_config['services'] = services
-        processed_config['volumes'] = interpolate_config_section(
-            config_file.filename,
-            config_file.get_volumes(),
-            'volume',
-            environment,)
-        processed_config['networks'] = interpolate_config_section(
-            config_file.filename,
-            config_file.get_networks(),
-            'network',
-            environment,)
-
-    if config_file.version == V1:
-        processed_config = services
-
-    config_file = config_file._replace(config=processed_config)
-    validate_against_config_schema(config_file)
-
-    if service_name and service_name not in services:
-        raise ConfigurationError(
-            "Cannot extend service '{}' in {}: Service not found".format(
-                service_name, config_file.filename))
-
-    return config_file
-
-
-class ServiceExtendsResolver(object):
-    def __init__(self, service_config, config_file, environment, already_seen=None):
-        self.service_config = service_config
-        self.working_dir = service_config.working_dir
-        self.already_seen = already_seen or []
-        self.config_file = config_file
-        self.environment = environment
-
-    @property
-    def signature(self):
-        return self.service_config.filename, self.service_config.name
-
-    def detect_cycle(self):
-        if self.signature in self.already_seen:
-            raise CircularReference(self.already_seen + [self.signature])
-
-    def run(self):
-        self.detect_cycle()
-
-        if 'extends' in self.service_config.config:
-            service_dict = self.resolve_extends(*self.validate_and_construct_extends())
-            return self.service_config._replace(config=service_dict)
-
-        return self.service_config
-
-    def validate_and_construct_extends(self):
-        extends = self.service_config.config['extends']
-        if not isinstance(extends, dict):
-            extends = {'service': extends}
-
-        config_path = self.get_extended_config_path(extends)
-        service_name = extends['service']
-
-        extends_file = ConfigFile.from_filename(config_path)
-        validate_config_version([self.config_file, extends_file])
-        extended_file = process_config_file(
-            extends_file, self.environment, service_name=service_name
-        )
-        service_config = extended_file.get_service(service_name)
-
-        return config_path, service_config, service_name
-
-    def resolve_extends(self, extended_config_path, service_dict, service_name):
-        resolver = ServiceExtendsResolver(
-            ServiceConfig.with_abs_paths(
-                os.path.dirname(extended_config_path),
-                extended_config_path,
-                service_name,
-                service_dict),
-            self.config_file,
-            already_seen=self.already_seen + [self.signature],
-            environment=self.environment
-        )
-
-        service_config = resolver.run()
-        other_service_dict = process_service(service_config)
-        validate_extended_service_dict(
-            other_service_dict,
-            extended_config_path,
-            service_name)
-
-        return merge_service_dicts(
-            other_service_dict,
-            self.service_config.config,
-            self.config_file.version)
-
-    def get_extended_config_path(self, extends_options):
-        """Service we are extending either has a value for 'file' set, which we
-        need to obtain a full path too or we are extending from a service
-        defined in our own file.
-        """
-        filename = self.service_config.filename
-        validate_extends_file_path(
-            self.service_config.name,
-            extends_options,
-            filename)
-        if 'file' in extends_options:
-            return expand_path(self.working_dir, extends_options['file'])
-        return filename
-
-
-def resolve_environment(service_dict, environment=None):
-    """Unpack any environment variables from an env_file, if set.
-    Interpolate environment values if set.
-    """
-    env = {}
-    for env_file in service_dict.get('env_file', []):
-        env.update(env_vars_from_file(env_file))
-
-    env.update(parse_environment(service_dict.get('environment')))
-    return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(env))
-
-
-def resolve_build_args(build, environment):
-    args = parse_build_arguments(build.get('args'))
-    return dict(resolve_env_var(k, v, environment) for k, v in six.iteritems(args))
-
-
-def validate_extended_service_dict(service_dict, filename, service):
-    error_prefix = "Cannot extend service '%s' in %s:" % (service, filename)
-
-    if 'links' in service_dict:
-        raise ConfigurationError(
-            "%s services with 'links' cannot be extended" % error_prefix)
-
-    if 'volumes_from' in service_dict:
-        raise ConfigurationError(
-            "%s services with 'volumes_from' cannot be extended" % error_prefix)
-
-    if 'net' in service_dict:
-        if get_container_name_from_network_mode(service_dict['net']):
-            raise ConfigurationError(
-                "%s services with 'net: container' cannot be extended" % error_prefix)
-
-    if 'network_mode' in service_dict:
-        if get_service_name_from_network_mode(service_dict['network_mode']):
-            raise ConfigurationError(
-                "%s services with 'network_mode: service' cannot be extended" % error_prefix)
-
-    if 'depends_on' in service_dict:
-        raise ConfigurationError(
-            "%s services with 'depends_on' cannot be extended" % error_prefix)
-
-
-def validate_service(service_config, service_names, version):
-    service_dict, service_name = service_config.config, service_config.name
-    validate_service_constraints(service_dict, service_name, version)
-    validate_paths(service_dict)
-
-    validate_ulimits(service_config)
-    validate_network_mode(service_config, service_names)
-    validate_depends_on(service_config, service_names)
-    validate_links(service_config, service_names)
-
-    if not service_dict.get('image') and has_uppercase(service_name):
-        raise ConfigurationError(
-            "Service '{name}' contains uppercase characters which are not valid "
-            "as part of an image name. Either use a lowercase service name or "
-            "use the `image` field to set a custom name for the service image."
-            .format(name=service_name))
-
-
-def process_service(service_config):
-    working_dir = service_config.working_dir
-    service_dict = dict(service_config.config)
-
-    if 'env_file' in service_dict:
-        service_dict['env_file'] = [
-            expand_path(working_dir, path)
-            for path in to_list(service_dict['env_file'])
-        ]
-
-    if 'build' in service_dict:
-        if isinstance(service_dict['build'], six.string_types):
-            service_dict['build'] = resolve_build_path(working_dir, service_dict['build'])
-        elif isinstance(service_dict['build'], dict) and 'context' in service_dict['build']:
-            path = service_dict['build']['context']
-            service_dict['build']['context'] = resolve_build_path(working_dir, path)
-
-    if 'volumes' in service_dict and service_dict.get('volume_driver') is None:
-        service_dict['volumes'] = resolve_volume_paths(working_dir, service_dict)
-
-    if 'labels' in service_dict:
-        service_dict['labels'] = parse_labels(service_dict['labels'])
-
-    if 'extra_hosts' in service_dict:
-        service_dict['extra_hosts'] = parse_extra_hosts(service_dict['extra_hosts'])
-
-    for field in ['dns', 'dns_search', 'tmpfs']:
-        if field in service_dict:
-            service_dict[field] = to_list(service_dict[field])
-
-    return service_dict
-
-
-def finalize_service(service_config, service_names, version, environment):
-    service_dict = dict(service_config.config)
-
-    if 'environment' in service_dict or 'env_file' in service_dict:
-        service_dict['environment'] = resolve_environment(service_dict, environment)
-        service_dict.pop('env_file', None)
-
-    if 'volumes_from' in service_dict:
-        service_dict['volumes_from'] = [
-            VolumeFromSpec.parse(vf, service_names, version)
-            for vf in service_dict['volumes_from']
-        ]
-
-    if 'volumes' in service_dict:
-        service_dict['volumes'] = [
-            VolumeSpec.parse(v) for v in service_dict['volumes']]
-
-    if 'net' in service_dict:
-        network_mode = service_dict.pop('net')
-        container_name = get_container_name_from_network_mode(network_mode)
-        if container_name and container_name in service_names:
-            service_dict['network_mode'] = 'service:{}'.format(container_name)
-        else:
-            service_dict['network_mode'] = network_mode
-
-    if 'networks' in service_dict:
-        service_dict['networks'] = parse_networks(service_dict['networks'])
-
-    if 'restart' in service_dict:
-        service_dict['restart'] = parse_restart_spec(service_dict['restart'])
-
-    normalize_build(service_dict, service_config.working_dir, environment)
-
-    service_dict['name'] = service_config.name
-    return normalize_v1_service_format(service_dict)
-
-
-def normalize_v1_service_format(service_dict):
-    if 'log_driver' in service_dict or 'log_opt' in service_dict:
-        if 'logging' not in service_dict:
-            service_dict['logging'] = {}
-        if 'log_driver' in service_dict:
-            service_dict['logging']['driver'] = service_dict['log_driver']
-            del service_dict['log_driver']
-        if 'log_opt' in service_dict:
-            service_dict['logging']['options'] = service_dict['log_opt']
-            del service_dict['log_opt']
-
-    if 'dockerfile' in service_dict:
-        service_dict['build'] = service_dict.get('build', {})
-        service_dict['build'].update({
-            'dockerfile': service_dict.pop('dockerfile')
-        })
-
-    return service_dict
-
-
-def merge_service_dicts_from_files(base, override, version):
-    """When merging services from multiple files we need to merge the `extends`
-    field. This is not handled by `merge_service_dicts()` which is used to
-    perform the `extends`.
-    """
-    new_service = merge_service_dicts(base, override, version)
-    if 'extends' in override:
-        new_service['extends'] = override['extends']
-    elif 'extends' in base:
-        new_service['extends'] = base['extends']
-    return new_service
-
-
-class MergeDict(dict):
-    """A dict-like object responsible for merging two dicts into one."""
-
-    def __init__(self, base, override):
-        self.base = base
-        self.override = override
-
-    def needs_merge(self, field):
-        return field in self.base or field in self.override
-
-    def merge_field(self, field, merge_func, default=None):
-        if not self.needs_merge(field):
-            return
-
-        self[field] = merge_func(
-            self.base.get(field, default),
-            self.override.get(field, default))
-
-    def merge_mapping(self, field, parse_func):
-        if not self.needs_merge(field):
-            return
-
-        self[field] = parse_func(self.base.get(field))
-        self[field].update(parse_func(self.override.get(field)))
-
-    def merge_sequence(self, field, parse_func):
-        def parse_sequence_func(seq):
-            return to_mapping((parse_func(item) for item in seq), 'merge_field')
-
-        if not self.needs_merge(field):
-            return
-
-        merged = parse_sequence_func(self.base.get(field, []))
-        merged.update(parse_sequence_func(self.override.get(field, [])))
-        self[field] = [item.repr() for item in sorted(merged.values())]
-
-    def merge_scalar(self, field):
-        if self.needs_merge(field):
-            self[field] = self.override.get(field, self.base.get(field))
-
-
-def merge_service_dicts(base, override, version):
-    md = MergeDict(base, override)
-
-    md.merge_mapping('environment', parse_environment)
-    md.merge_mapping('labels', parse_labels)
-    md.merge_mapping('ulimits', parse_ulimits)
-    md.merge_mapping('networks', parse_networks)
-    md.merge_sequence('links', ServiceLink.parse)
-
-    for field in ['volumes', 'devices']:
-        md.merge_field(field, merge_path_mappings)
-
-    for field in [
-        'ports', 'cap_add', 'cap_drop', 'expose', 'external_links',
-        'security_opt', 'volumes_from', 'depends_on',
-    ]:
-        md.merge_field(field, merge_unique_items_lists, default=[])
-
-    for field in ['dns', 'dns_search', 'env_file', 'tmpfs']:
-        md.merge_field(field, merge_list_or_string)
-
-    for field in set(ALLOWED_KEYS) - set(md):
-        md.merge_scalar(field)
-
-    if version == V1:
-        legacy_v1_merge_image_or_build(md, base, override)
-    elif md.needs_merge('build'):
-        md['build'] = merge_build(md, base, override)
-
-    return dict(md)
-
-
-def merge_unique_items_lists(base, override):
-    return sorted(set().union(base, override))
-
-
-def merge_build(output, base, override):
-    def to_dict(service):
-        build_config = service.get('build', {})
-        if isinstance(build_config, six.string_types):
-            return {'context': build_config}
-        return build_config
-
-    md = MergeDict(to_dict(base), to_dict(override))
-    md.merge_scalar('context')
-    md.merge_scalar('dockerfile')
-    md.merge_mapping('args', parse_build_arguments)
-    return dict(md)
-
-
-def legacy_v1_merge_image_or_build(output, base, override):
-    output.pop('image', None)
-    output.pop('build', None)
-    if 'image' in override:
-        output['image'] = override['image']
-    elif 'build' in override:
-        output['build'] = override['build']
-    elif 'image' in base:
-        output['image'] = base['image']
-    elif 'build' in base:
-        output['build'] = base['build']
-
-
-def merge_environment(base, override):
-    env = parse_environment(base)
-    env.update(parse_environment(override))
-    return env
-
-
-def split_label(label):
-    if '=' in label:
-        return label.split('=', 1)
-    else:
-        return label, ''
-
-
-def parse_dict_or_list(split_func, type_name, arguments):
-    if not arguments:
-        return {}
-
-    if isinstance(arguments, list):
-        return dict(split_func(e) for e in arguments)
-
-    if isinstance(arguments, dict):
-        return dict(arguments)
-
-    raise ConfigurationError(
-        "%s \"%s\" must be a list or mapping," %
-        (type_name, arguments)
-    )
-
-
-parse_build_arguments = functools.partial(parse_dict_or_list, split_env, 'build arguments')
-parse_environment = functools.partial(parse_dict_or_list, split_env, 'environment')
-parse_labels = functools.partial(parse_dict_or_list, split_label, 'labels')
-parse_networks = functools.partial(parse_dict_or_list, lambda k: (k, None), 'networks')
-
-
-def parse_ulimits(ulimits):
-    if not ulimits:
-        return {}
-
-    if isinstance(ulimits, dict):
-        return dict(ulimits)
-
-
-def resolve_env_var(key, val, environment):
-    if val is not None:
-        return key, val
-    elif environment and key in environment:
-        return key, environment[key]
-    else:
-        return key, None
-
-
-def resolve_volume_paths(working_dir, service_dict):
-    return [
-        resolve_volume_path(working_dir, volume)
-        for volume in service_dict['volumes']
-    ]
-
-
-def resolve_volume_path(working_dir, volume):
-    container_path, host_path = split_path_mapping(volume)
-
-    if host_path is not None:
-        if host_path.startswith('.'):
-            host_path = expand_path(working_dir, host_path)
-        host_path = os.path.expanduser(host_path)
-        return u"{}:{}".format(host_path, container_path)
-    else:
-        return container_path
-
-
-def normalize_build(service_dict, working_dir, environment):
-
-    if 'build' in service_dict:
-        build = {}
-        # Shortcut where specifying a string is treated as the build context
-        if isinstance(service_dict['build'], six.string_types):
-            build['context'] = service_dict.pop('build')
-        else:
-            build.update(service_dict['build'])
-            if 'args' in build:
-                build['args'] = build_string_dict(
-                    resolve_build_args(build, environment)
-                )
-
-        service_dict['build'] = build
-
-
-def resolve_build_path(working_dir, build_path):
-    if is_url(build_path):
-        return build_path
-    return expand_path(working_dir, build_path)
-
-
-def is_url(build_path):
-    return build_path.startswith(DOCKER_VALID_URL_PREFIXES)
-
-
-def validate_paths(service_dict):
-    if 'build' in service_dict:
-        build = service_dict.get('build', {})
-
-        if isinstance(build, six.string_types):
-            build_path = build
-        elif isinstance(build, dict) and 'context' in build:
-            build_path = build['context']
-        else:
-            # We have a build section but no context, so nothing to validate
-            return
-
-        if (
-            not is_url(build_path) and
-            (not os.path.exists(build_path) or not os.access(build_path, os.R_OK))
-        ):
-            raise ConfigurationError(
-                "build path %s either does not exist, is not accessible, "
-                "or is not a valid URL." % build_path)
-
-
-def merge_path_mappings(base, override):
-    d = dict_from_path_mappings(base)
-    d.update(dict_from_path_mappings(override))
-    return path_mappings_from_dict(d)
-
-
-def dict_from_path_mappings(path_mappings):
-    if path_mappings:
-        return dict(split_path_mapping(v) for v in path_mappings)
-    else:
-        return {}
-
-
-def path_mappings_from_dict(d):
-    return [join_path_mapping(v) for v in sorted(d.items())]
-
-
-def split_path_mapping(volume_path):
-    """
-    Ascertain if the volume_path contains a host path as well as a container
-    path. Using splitdrive so windows absolute paths won't cause issues with
-    splitting on ':'.
-    """
-    # splitdrive is very naive, so handle special cases where we can be sure
-    # the first character is not a drive.
-    if (volume_path.startswith('.') or volume_path.startswith('~') or
-            volume_path.startswith('/')):
-        drive, volume_config = '', volume_path
-    else:
-        drive, volume_config = ntpath.splitdrive(volume_path)
-
-    if ':' in volume_config:
-        (host, container) = volume_config.split(':', 1)
-        return (container, drive + host)
-    else:
-        return (volume_path, None)
-
-
-def join_path_mapping(pair):
-    (container, host) = pair
-    if host is None:
-        return container
-    else:
-        return ":".join((host, container))
-
-
-def expand_path(working_dir, path):
-    return os.path.abspath(os.path.join(working_dir, os.path.expanduser(path)))
-
-
-def merge_list_or_string(base, override):
-    return to_list(base) + to_list(override)
-
-
-def to_list(value):
-    if value is None:
-        return []
-    elif isinstance(value, six.string_types):
-        return [value]
-    else:
-        return value
-
-
-def to_mapping(sequence, key_field):
-    return {getattr(item, key_field): item for item in sequence}
-
-
-def has_uppercase(name):
-    return any(char in string.ascii_uppercase for char in name)
-
-
-def load_yaml(filename):
-    try:
-        with open(filename, 'r') as fh:
-            return yaml.safe_load(fh)
-    except (IOError, yaml.YAMLError) as e:
-        error_name = getattr(e, '__module__', '') + '.' + e.__class__.__name__
-        raise ConfigurationError(u"{}: {}".format(error_name, e))
diff --git a/env2/lib/python2.7/site-packages/compose/config/config_schema_v1.json b/env2/lib/python2.7/site-packages/compose/config/config_schema_v1.json
deleted file mode 100644
index 36a9379..0000000
--- a/env2/lib/python2.7/site-packages/compose/config/config_schema_v1.json
+++ /dev/null
@@ -1,187 +0,0 @@
-{
-  "$schema": "http://json-schema.org/draft-04/schema#",
-  "id": "config_schema_v1.json",
-
-  "type": "object",
-
-  "patternProperties": {
-    "^[a-zA-Z0-9._-]+$": {
-      "$ref": "#/definitions/service"
-    }
-  },
-
-  "additionalProperties": false,
-
-  "definitions": {
-    "service": {
-      "id": "#/definitions/service",
-      "type": "object",
-
-      "properties": {
-        "build": {"type": "string"},
-        "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-        "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-        "cgroup_parent": {"type": "string"},
-        "command": {
-          "oneOf": [
-            {"type": "string"},
-            {"type": "array", "items": {"type": "string"}}
-          ]
-        },
-        "container_name": {"type": "string"},
-        "cpu_shares": {"type": ["number", "string"]},
-        "cpu_quota": {"type": ["number", "string"]},
-        "cpuset": {"type": "string"},
-        "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-        "dns": {"$ref": "#/definitions/string_or_list"},
-        "dns_search": {"$ref": "#/definitions/string_or_list"},
-        "dockerfile": {"type": "string"},
-        "domainname": {"type": "string"},
-        "entrypoint": {
-          "oneOf": [
-            {"type": "string"},
-            {"type": "array", "items": {"type": "string"}}
-          ]
-        },
-        "env_file": {"$ref": "#/definitions/string_or_list"},
-        "environment": {"$ref": "#/definitions/list_or_dict"},
-
-        "expose": {
-          "type": "array",
-          "items": {
-            "type": ["string", "number"],
-            "format": "expose"
-          },
-          "uniqueItems": true
-        },
-
-        "extends": {
-          "oneOf": [
-            {
-              "type": "string"
-            },
-            {
-              "type": "object",
-
-              "properties": {
-                "service": {"type": "string"},
-                "file": {"type": "string"}
-              },
-              "required": ["service"],
-              "additionalProperties": false
-            }
-          ]
-        },
-
-        "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
-        "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-        "hostname": {"type": "string"},
-        "image": {"type": "string"},
-        "ipc": {"type": "string"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
-        "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-        "log_driver": {"type": "string"},
-        "log_opt": {"type": "object"},
-        "mac_address": {"type": "string"},
-        "mem_limit": {"type": ["number", "string"]},
-        "memswap_limit": {"type": ["number", "string"]},
-        "net": {"type": "string"},
-        "pid": {"type": ["string", "null"]},
-
-        "ports": {
-          "type": "array",
-          "items": {
-            "type": ["string", "number"],
-            "format": "ports"
-          },
-          "uniqueItems": true
-        },
-
-        "privileged": {"type": "boolean"},
-        "read_only": {"type": "boolean"},
-        "restart": {"type": "string"},
-        "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-        "shm_size": {"type": ["number", "string"]},
-        "stdin_open": {"type": "boolean"},
-        "stop_signal": {"type": "string"},
-        "tty": {"type": "boolean"},
-        "ulimits": {
-          "type": "object",
-          "patternProperties": {
-            "^[a-z]+$": {
-              "oneOf": [
-                {"type": "integer"},
-                {
-                  "type":"object",
-                  "properties": {
-                    "hard": {"type": "integer"},
-                    "soft": {"type": "integer"}
-                  },
-                  "required": ["soft", "hard"],
-                  "additionalProperties": false
-                }
-              ]
-            }
-          }
-        },
-        "user": {"type": "string"},
-        "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-        "volume_driver": {"type": "string"},
-        "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-        "working_dir": {"type": "string"}
-      },
-
-      "dependencies": {
-        "memswap_limit": ["mem_limit"]
-      },
-      "additionalProperties": false
-    },
-
-    "string_or_list": {
-      "oneOf": [
-        {"type": "string"},
-        {"$ref": "#/definitions/list_of_strings"}
-      ]
-    },
-
-    "list_of_strings": {
-      "type": "array",
-      "items": {"type": "string"},
-      "uniqueItems": true
-    },
-
-    "list_or_dict": {
-      "oneOf": [
-        {
-          "type": "object",
-          "patternProperties": {
-            ".+": {
-              "type": ["string", "number", "null"]
-            }
-          },
-          "additionalProperties": false
-        },
-        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
-      ]
-    },
-
-    "constraints": {
-      "service": {
-        "id": "#/definitions/constraints/service",
-        "anyOf": [
-          {
-            "required": ["build"],
-            "not": {"required": ["image"]}
-          },
-          {
-            "required": ["image"],
-            "not": {"anyOf": [
-              {"required": ["build"]},
-              {"required": ["dockerfile"]}
-            ]}
-          }
-        ]
-      }
-    }
-  }
-}
diff --git a/env2/lib/python2.7/site-packages/compose/config/config_schema_v2.0.json b/env2/lib/python2.7/site-packages/compose/config/config_schema_v2.0.json
deleted file mode 100644
index e84d131..0000000
--- a/env2/lib/python2.7/site-packages/compose/config/config_schema_v2.0.json
+++ /dev/null
@@ -1,318 +0,0 @@
-{
-  "$schema": "http://json-schema.org/draft-04/schema#",
-  "id": "config_schema_v2.0.json",
-  "type": "object",
-
-  "properties": {
-    "version": {
-      "type": "string"
-    },
-
-    "services": {
-      "id": "#/properties/services",
-      "type": "object",
-      "patternProperties": {
-        "^[a-zA-Z0-9._-]+$": {
-          "$ref": "#/definitions/service"
-        }
-      },
-      "additionalProperties": false
-    },
-
-    "networks": {
-      "id": "#/properties/networks",
-      "type": "object",
-      "patternProperties": {
-        "^[a-zA-Z0-9._-]+$": {
-          "$ref": "#/definitions/network"
-        }
-      }
-    },
-
-    "volumes": {
-      "id": "#/properties/volumes",
-      "type": "object",
-      "patternProperties": {
-        "^[a-zA-Z0-9._-]+$": {
-          "$ref": "#/definitions/volume"
-        }
-      },
-      "additionalProperties": false
-    }
-  },
-
-  "additionalProperties": false,
-
-  "definitions": {
-
-    "service": {
-      "id": "#/definitions/service",
-      "type": "object",
-
-      "properties": {
-        "build": {
-          "oneOf": [
-            {"type": "string"},
-            {
-              "type": "object",
-              "properties": {
-                "context": {"type": "string"},
-                "dockerfile": {"type": "string"},
-                "args": {"$ref": "#/definitions/list_or_dict"}
-              },
-              "additionalProperties": false
-            }
-          ]
-        },
-        "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-        "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-        "cgroup_parent": {"type": "string"},
-        "command": {
-          "oneOf": [
-            {"type": "string"},
-            {"type": "array", "items": {"type": "string"}}
-          ]
-        },
-        "container_name": {"type": "string"},
-        "cpu_shares": {"type": ["number", "string"]},
-        "cpu_quota": {"type": ["number", "string"]},
-        "cpuset": {"type": "string"},
-        "depends_on": {"$ref": "#/definitions/list_of_strings"},
-        "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-        "dns": {"$ref": "#/definitions/string_or_list"},
-        "dns_search": {"$ref": "#/definitions/string_or_list"},
-        "domainname": {"type": "string"},
-        "entrypoint": {
-          "oneOf": [
-            {"type": "string"},
-            {"type": "array", "items": {"type": "string"}}
-          ]
-        },
-        "env_file": {"$ref": "#/definitions/string_or_list"},
-        "environment": {"$ref": "#/definitions/list_or_dict"},
-
-        "expose": {
-          "type": "array",
-          "items": {
-            "type": ["string", "number"],
-            "format": "expose"
-          },
-          "uniqueItems": true
-        },
-
-        "extends": {
-          "oneOf": [
-            {
-              "type": "string"
-            },
-            {
-              "type": "object",
-
-              "properties": {
-                "service": {"type": "string"},
-                "file": {"type": "string"}
-              },
-              "required": ["service"],
-              "additionalProperties": false
-            }
-          ]
-        },
-
-        "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-        "extra_hosts": {"$ref": "#/definitions/list_or_dict"},
-        "hostname": {"type": "string"},
-        "image": {"type": "string"},
-        "ipc": {"type": "string"},
-        "labels": {"$ref": "#/definitions/list_or_dict"},
-        "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-
-        "logging": {
-            "type": "object",
-
-            "properties": {
-                "driver": {"type": "string"},
-                "options": {"type": "object"}
-            },
-            "additionalProperties": false
-        },
-
-        "mac_address": {"type": "string"},
-        "mem_limit": {"type": ["number", "string"]},
-        "memswap_limit": {"type": ["number", "string"]},
-        "network_mode": {"type": "string"},
-
-        "networks": {
-          "oneOf": [
-            {"$ref": "#/definitions/list_of_strings"},
-            {
-              "type": "object",
-              "patternProperties": {
-                "^[a-zA-Z0-9._-]+$": {
-                  "oneOf": [
-                    {
-                      "type": "object",
-                      "properties": {
-                        "aliases": {"$ref": "#/definitions/list_of_strings"},
-                        "ipv4_address": {"type": "string"},
-                        "ipv6_address": {"type": "string"}
-                      },
-                      "additionalProperties": false
-                    },
-                    {"type": "null"}
-                  ]
-                }
-              },
-              "additionalProperties": false
-            }
-          ]
-        },
-        "pid": {"type": ["string", "null"]},
-
-        "ports": {
-          "type": "array",
-          "items": {
-            "type": ["string", "number"],
-            "format": "ports"
-          },
-          "uniqueItems": true
-        },
-
-        "privileged": {"type": "boolean"},
-        "read_only": {"type": "boolean"},
-        "restart": {"type": "string"},
-        "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-        "shm_size": {"type": ["number", "string"]},
-        "stdin_open": {"type": "boolean"},
-        "stop_signal": {"type": "string"},
-        "tmpfs": {"$ref": "#/definitions/string_or_list"},
-        "tty": {"type": "boolean"},
-        "ulimits": {
-          "type": "object",
-          "patternProperties": {
-            "^[a-z]+$": {
-              "oneOf": [
-                {"type": "integer"},
-                {
-                  "type":"object",
-                  "properties": {
-                    "hard": {"type": "integer"},
-                    "soft": {"type": "integer"}
-                  },
-                  "required": ["soft", "hard"],
-                  "additionalProperties": false
-                }
-              ]
-            }
-          }
-        },
-        "user": {"type": "string"},
-        "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-        "volume_driver": {"type": "string"},
-        "volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
-        "working_dir": {"type": "string"}
-      },
-
-      "dependencies": {
-        "memswap_limit": ["mem_limit"]
-      },
-      "additionalProperties": false
-    },
-
-    "network": {
-      "id": "#/definitions/network",
-      "type": "object",
-      "properties": {
-        "driver": {"type": "string"},
-        "driver_opts": {
-          "type": "object",
-          "patternProperties": {
-            "^.+$": {"type": ["string", "number"]}
-          }
-        },
-        "ipam": {
-            "type": "object",
-            "properties": {
-                "driver": {"type": "string"},
-                "config": {
-                    "type": "array"
-                }
-            },
-            "additionalProperties": false
-        },
-        "external": {
-          "type": ["boolean", "object"],
-          "properties": {
-            "name": {"type": "string"}
-          },
-          "additionalProperties": false
-        }
-      },
-      "additionalProperties": false
-    },
-
-    "volume": {
-      "id": "#/definitions/volume",
-      "type": ["object", "null"],
-      "properties": {
-        "driver": {"type": "string"},
-        "driver_opts": {
-          "type": "object",
-          "patternProperties": {
-            "^.+$": {"type": ["string", "number"]}
-          }
-        },
-        "external": {
-          "type": ["boolean", "object"],
-          "properties": {
-            "name": {"type": "string"}
-          }
-        },
-        "additionalProperties": false
-      },
-      "additionalProperties": false
-    },
-
-    "string_or_list": {
-      "oneOf": [
-        {"type": "string"},
-        {"$ref": "#/definitions/list_of_strings"}
-      ]
-    },
-
-    "list_of_strings": {
-      "type": "array",
-      "items": {"type": "string"},
-      "uniqueItems": true
-    },
-
-    "list_or_dict": {
-      "oneOf": [
-        {
-          "type": "object",
-          "patternProperties": {
-            ".+": {
-              "type": ["string", "number", "null"]
-            }
-          },
-          "additionalProperties": false
-        },
-        {"type": "array", "items": {"type": "string"}, "uniqueItems": true}
-      ]
-    },
-
-    "constraints": {
-      "service": {
-        "id": "#/definitions/constraints/service",
-        "anyOf": [
-          {"required": ["build"]},
-          {"required": ["image"]}
-        ],
-        "properties": {
-          "build": {
-            "required": ["context"]
-          }
-        }
-      }
-    }
-  }
-}
diff --git a/env2/lib/python2.7/site-packages/compose/config/environment.py b/env2/lib/python2.7/site-packages/compose/config/environment.py
deleted file mode 100644
index 5d6b5af..0000000
--- a/env2/lib/python2.7/site-packages/compose/config/environment.py
+++ /dev/null
@@ -1,107 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import codecs
-import logging
-import os
-
-import six
-
-from ..const import IS_WINDOWS_PLATFORM
-from .errors import ConfigurationError
-
-log = logging.getLogger(__name__)
-
-
-def split_env(env):
-    if isinstance(env, six.binary_type):
-        env = env.decode('utf-8', 'replace')
-    if '=' in env:
-        return env.split('=', 1)
-    else:
-        return env, None
-
-
-def env_vars_from_file(filename):
-    """
-    Read in a line delimited file of environment variables.
-    """
-    if not os.path.exists(filename):
-        raise ConfigurationError("Couldn't find env file: %s" % filename)
-    elif not os.path.isfile(filename):
-        raise ConfigurationError("%s is not a file." % (filename))
-    env = {}
-    for line in codecs.open(filename, 'r', 'utf-8'):
-        line = line.strip()
-        if line and not line.startswith('#'):
-            k, v = split_env(line)
-            env[k] = v
-    return env
-
-
-class Environment(dict):
-    def __init__(self, *args, **kwargs):
-        super(Environment, self).__init__(*args, **kwargs)
-        self.missing_keys = []
-
-    @classmethod
-    def from_env_file(cls, base_dir):
-        def _initialize():
-            result = cls()
-            if base_dir is None:
-                return result
-            env_file_path = os.path.join(base_dir, '.env')
-            try:
-                return cls(env_vars_from_file(env_file_path))
-            except ConfigurationError:
-                pass
-            return result
-        instance = _initialize()
-        instance.update(os.environ)
-        return instance
-
-    @classmethod
-    def from_command_line(cls, parsed_env_opts):
-        result = cls()
-        for k, v in parsed_env_opts.items():
-            # Values from the command line take priority, unless they're unset
-            # in which case they take the value from the system's environment
-            if v is None and k in os.environ:
-                result[k] = os.environ[k]
-            else:
-                result[k] = v
-        return result
-
-    def __getitem__(self, key):
-        try:
-            return super(Environment, self).__getitem__(key)
-        except KeyError:
-            if IS_WINDOWS_PLATFORM:
-                try:
-                    return super(Environment, self).__getitem__(key.upper())
-                except KeyError:
-                    pass
-            if key not in self.missing_keys:
-                log.warn(
-                    "The {} variable is not set. Defaulting to a blank string."
-                    .format(key)
-                )
-                self.missing_keys.append(key)
-
-            return ""
-
-    def __contains__(self, key):
-        result = super(Environment, self).__contains__(key)
-        if IS_WINDOWS_PLATFORM:
-            return (
-                result or super(Environment, self).__contains__(key.upper())
-            )
-        return result
-
-    def get(self, key, *args, **kwargs):
-        if IS_WINDOWS_PLATFORM:
-            return super(Environment, self).get(
-                key,
-                super(Environment, self).get(key.upper(), *args, **kwargs)
-            )
-        return super(Environment, self).get(key, *args, **kwargs)
diff --git a/env2/lib/python2.7/site-packages/compose/config/errors.py b/env2/lib/python2.7/site-packages/compose/config/errors.py
deleted file mode 100644
index d14cbbd..0000000
--- a/env2/lib/python2.7/site-packages/compose/config/errors.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-
-VERSION_EXPLANATION = (
-    'You might be seeing this error because you\'re using the wrong Compose '
-    'file version. Either specify a version of "2" (or "2.0") and place your '
-    'service definitions under the `services` key, or omit the `version` key '
-    'and place your service definitions at the root of the file to use '
-    'version 1.\nFor more on the Compose file format versions, see '
-    'https://docs.docker.com/compose/compose-file/')
-
-
-class ConfigurationError(Exception):
-    def __init__(self, msg):
-        self.msg = msg
-
-    def __str__(self):
-        return self.msg
-
-
-class DependencyError(ConfigurationError):
-    pass
-
-
-class CircularReference(ConfigurationError):
-    def __init__(self, trail):
-        self.trail = trail
-
-    @property
-    def msg(self):
-        lines = [
-            "{} in {}".format(service_name, filename)
-            for (filename, service_name) in self.trail
-        ]
-        return "Circular reference:\n  {}".format("\n  extends ".join(lines))
-
-
-class ComposeFileNotFound(ConfigurationError):
-    def __init__(self, supported_filenames):
-        super(ComposeFileNotFound, self).__init__("""
-        Can't find a suitable configuration file in this directory or any
-        parent. Are you in the right directory?
-
-        Supported filenames: %s
-        """ % ", ".join(supported_filenames))
diff --git a/env2/lib/python2.7/site-packages/compose/config/interpolation.py b/env2/lib/python2.7/site-packages/compose/config/interpolation.py
deleted file mode 100644
index 63020d9..0000000
--- a/env2/lib/python2.7/site-packages/compose/config/interpolation.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import logging
-from string import Template
-
-import six
-
-from .errors import ConfigurationError
-log = logging.getLogger(__name__)
-
-
-def interpolate_environment_variables(config, section, environment):
-
-    def process_item(name, config_dict):
-        return dict(
-            (key, interpolate_value(name, key, val, section, environment))
-            for key, val in (config_dict or {}).items()
-        )
-
-    return dict(
-        (name, process_item(name, config_dict or {}))
-        for name, config_dict in config.items()
-    )
-
-
-def interpolate_value(name, config_key, value, section, mapping):
-    try:
-        return recursive_interpolate(value, mapping)
-    except InvalidInterpolation as e:
-        raise ConfigurationError(
-            'Invalid interpolation format for "{config_key}" option '
-            'in {section} "{name}": "{string}"'.format(
-                config_key=config_key,
-                name=name,
-                section=section,
-                string=e.string))
-
-
-def recursive_interpolate(obj, mapping):
-    if isinstance(obj, six.string_types):
-        return interpolate(obj, mapping)
-    elif isinstance(obj, dict):
-        return dict(
-            (key, recursive_interpolate(val, mapping))
-            for (key, val) in obj.items()
-        )
-    elif isinstance(obj, list):
-        return [recursive_interpolate(val, mapping) for val in obj]
-    else:
-        return obj
-
-
-def interpolate(string, mapping):
-    try:
-        return Template(string).substitute(mapping)
-    except ValueError:
-        raise InvalidInterpolation(string)
-
-
-class InvalidInterpolation(Exception):
-    def __init__(self, string):
-        self.string = string
diff --git a/env2/lib/python2.7/site-packages/compose/config/serialize.py b/env2/lib/python2.7/site-packages/compose/config/serialize.py
deleted file mode 100644
index b788a55..0000000
--- a/env2/lib/python2.7/site-packages/compose/config/serialize.py
+++ /dev/null
@@ -1,60 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import six
-import yaml
-
-from compose.config import types
-from compose.config.config import V1
-from compose.config.config import V2_0
-
-
-def serialize_config_type(dumper, data):
-    representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
-    return representer(data.repr())
-
-
-yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
-yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
-
-
-def denormalize_config(config):
-    denormalized_services = [
-        denormalize_service_dict(service_dict, config.version)
-        for service_dict in config.services
-    ]
-    services = {
-        service_dict.pop('name'): service_dict
-        for service_dict in denormalized_services
-    }
-    networks = config.networks.copy()
-    for net_name, net_conf in networks.items():
-        if 'external_name' in net_conf:
-            del net_conf['external_name']
-
-    return {
-        'version': V2_0,
-        'services': services,
-        'networks': networks,
-        'volumes': config.volumes,
-    }
-
-
-def serialize_config(config):
-    return yaml.safe_dump(
-        denormalize_config(config),
-        default_flow_style=False,
-        indent=2,
-        width=80)
-
-
-def denormalize_service_dict(service_dict, version):
-    service_dict = service_dict.copy()
-
-    if 'restart' in service_dict:
-        service_dict['restart'] = types.serialize_restart_spec(service_dict['restart'])
-
-    if version == V1 and 'network_mode' not in service_dict:
-        service_dict['network_mode'] = 'bridge'
-
-    return service_dict
diff --git a/env2/lib/python2.7/site-packages/compose/config/sort_services.py b/env2/lib/python2.7/site-packages/compose/config/sort_services.py
deleted file mode 100644
index 20ac446..0000000
--- a/env2/lib/python2.7/site-packages/compose/config/sort_services.py
+++ /dev/null
@@ -1,72 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-from compose.config.errors import DependencyError
-
-
-def get_service_name_from_network_mode(network_mode):
-    return get_source_name_from_network_mode(network_mode, 'service')
-
-
-def get_container_name_from_network_mode(network_mode):
-    return get_source_name_from_network_mode(network_mode, 'container')
-
-
-def get_source_name_from_network_mode(network_mode, source_type):
-    if not network_mode:
-        return
-
-    if not network_mode.startswith(source_type+':'):
-        return
-
-    _, net_name = network_mode.split(':', 1)
-    return net_name
-
-
-def get_service_names(links):
-    return [link.split(':')[0] for link in links]
-
-
-def get_service_names_from_volumes_from(volumes_from):
-    return [volume_from.source for volume_from in volumes_from]
-
-
-def get_service_dependents(service_dict, services):
-    name = service_dict['name']
-    return [
-        service for service in services
-        if (name in get_service_names(service.get('links', [])) or
-            name in get_service_names_from_volumes_from(service.get('volumes_from', [])) or
-            name == get_service_name_from_network_mode(service.get('network_mode')) or
-            name in service.get('depends_on', []))
-    ]
-
-
-def sort_service_dicts(services):
-    # Topological sort (Cormen/Tarjan algorithm).
-    unmarked = services[:]
-    temporary_marked = set()
-    sorted_services = []
-
-    def visit(n):
-        if n['name'] in temporary_marked:
-            if n['name'] in get_service_names(n.get('links', [])):
-                raise DependencyError('A service can not link to itself: %s' % n['name'])
-            if n['name'] in n.get('volumes_from', []):
-                raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
-            if n['name'] in n.get('depends_on', []):
-                raise DependencyError('A service can not depend on itself: %s' % n['name'])
-            raise DependencyError('Circular dependency between %s' % ' and '.join(temporary_marked))
-
-        if n in unmarked:
-            temporary_marked.add(n['name'])
-            for m in get_service_dependents(n, services):
-                visit(m)
-            temporary_marked.remove(n['name'])
-            unmarked.remove(n)
-            sorted_services.insert(0, n)
-
-    while unmarked:
-        visit(unmarked[-1])
-
-    return sorted_services
diff --git a/env2/lib/python2.7/site-packages/compose/config/types.py b/env2/lib/python2.7/site-packages/compose/config/types.py
deleted file mode 100644
index e6a3dea..0000000
--- a/env2/lib/python2.7/site-packages/compose/config/types.py
+++ /dev/null
@@ -1,198 +0,0 @@
-"""
-Types for objects parsed from the configuration.
-"""
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import os
-from collections import namedtuple
-
-import six
-
-from compose.config.config import V1
-from compose.config.errors import ConfigurationError
-from compose.const import IS_WINDOWS_PLATFORM
-
-
-class VolumeFromSpec(namedtuple('_VolumeFromSpec', 'source mode type')):
-
-    # TODO: drop service_names arg when v1 is removed
-    @classmethod
-    def parse(cls, volume_from_config, service_names, version):
-        func = cls.parse_v1 if version == V1 else cls.parse_v2
-        return func(service_names, volume_from_config)
-
-    @classmethod
-    def parse_v1(cls, service_names, volume_from_config):
-        parts = volume_from_config.split(':')
-        if len(parts) > 2:
-            raise ConfigurationError(
-                "volume_from {} has incorrect format, should be "
-                "service[:mode]".format(volume_from_config))
-
-        if len(parts) == 1:
-            source = parts[0]
-            mode = 'rw'
-        else:
-            source, mode = parts
-
-        type = 'service' if source in service_names else 'container'
-        return cls(source, mode, type)
-
-    @classmethod
-    def parse_v2(cls, service_names, volume_from_config):
-        parts = volume_from_config.split(':')
-        if len(parts) > 3:
-            raise ConfigurationError(
-                "volume_from {} has incorrect format, should be one of "
-                "'<service name>[:<mode>]' or "
-                "'container:<container name>[:<mode>]'".format(volume_from_config))
-
-        if len(parts) == 1:
-            source = parts[0]
-            return cls(source, 'rw', 'service')
-
-        if len(parts) == 2:
-            if parts[0] == 'container':
-                type, source = parts
-                return cls(source, 'rw', type)
-
-            source, mode = parts
-            return cls(source, mode, 'service')
-
-        if len(parts) == 3:
-            type, source, mode = parts
-            if type not in ('service', 'container'):
-                raise ConfigurationError(
-                    "Unknown volumes_from type '{}' in '{}'".format(
-                        type,
-                        volume_from_config))
-
-        return cls(source, mode, type)
-
-    def repr(self):
-        return '{v.type}:{v.source}:{v.mode}'.format(v=self)
-
-
-def parse_restart_spec(restart_config):
-    if not restart_config:
-        return None
-    parts = restart_config.split(':')
-    if len(parts) > 2:
-        raise ConfigurationError(
-            "Restart %s has incorrect format, should be "
-            "mode[:max_retry]" % restart_config)
-    if len(parts) == 2:
-        name, max_retry_count = parts
-    else:
-        name, = parts
-        max_retry_count = 0
-
-    return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
-
-
-def serialize_restart_spec(restart_spec):
-    parts = [restart_spec['Name']]
-    if restart_spec['MaximumRetryCount']:
-        parts.append(six.text_type(restart_spec['MaximumRetryCount']))
-    return ':'.join(parts)
-
-
-def parse_extra_hosts(extra_hosts_config):
-    if not extra_hosts_config:
-        return {}
-
-    if isinstance(extra_hosts_config, dict):
-        return dict(extra_hosts_config)
-
-    if isinstance(extra_hosts_config, list):
-        extra_hosts_dict = {}
-        for extra_hosts_line in extra_hosts_config:
-            # TODO: validate string contains ':' ?
-            host, ip = extra_hosts_line.split(':', 1)
-            extra_hosts_dict[host.strip()] = ip.strip()
-        return extra_hosts_dict
-
-
-def normalize_paths_for_engine(external_path, internal_path):
-    """Windows paths, c:\my\path\shiny, need to be changed to be compatible with
-    the Engine. Volume paths are expected to be linux style /c/my/path/shiny/
-    """
-    if not IS_WINDOWS_PLATFORM:
-        return external_path, internal_path
-
-    if external_path:
-        drive, tail = os.path.splitdrive(external_path)
-
-        if drive:
-            external_path = '/' + drive.lower().rstrip(':') + tail
-
-        external_path = external_path.replace('\\', '/')
-
-    return external_path, internal_path.replace('\\', '/')
-
-
-class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
-
-    @classmethod
-    def parse(cls, volume_config):
-        """Parse a volume_config path and split it into external:internal[:mode]
-        parts to be returned as a valid VolumeSpec.
-        """
-        if IS_WINDOWS_PLATFORM:
-            # relative paths in windows expand to include the drive, eg C:\
-            # so we join the first 2 parts back together to count as one
-            drive, tail = os.path.splitdrive(volume_config)
-            parts = tail.split(":")
-
-            if drive:
-                parts[0] = drive + parts[0]
-        else:
-            parts = volume_config.split(':')
-
-        if len(parts) > 3:
-            raise ConfigurationError(
-                "Volume %s has incorrect format, should be "
-                "external:internal[:mode]" % volume_config)
-
-        if len(parts) == 1:
-            external, internal = normalize_paths_for_engine(
-                None,
-                os.path.normpath(parts[0]))
-        else:
-            external, internal = normalize_paths_for_engine(
-                os.path.normpath(parts[0]),
-                os.path.normpath(parts[1]))
-
-        mode = 'rw'
-        if len(parts) == 3:
-            mode = parts[2]
-
-        return cls(external, internal, mode)
-
-    def repr(self):
-        external = self.external + ':' if self.external else ''
-        return '{ext}{v.internal}:{v.mode}'.format(ext=external, v=self)
-
-    @property
-    def is_named_volume(self):
-        return self.external and not self.external.startswith(('.', '/', '~'))
-
-
-class ServiceLink(namedtuple('_ServiceLink', 'target alias')):
-
-    @classmethod
-    def parse(cls, link_spec):
-        target, _, alias = link_spec.partition(':')
-        if not alias:
-            alias = target
-        return cls(target, alias)
-
-    def repr(self):
-        if self.target == self.alias:
-            return self.target
-        return '{s.target}:{s.alias}'.format(s=self)
-
-    @property
-    def merge_field(self):
-        return self.alias
diff --git a/env2/lib/python2.7/site-packages/compose/config/validation.py b/env2/lib/python2.7/site-packages/compose/config/validation.py
deleted file mode 100644
index 7452e98..0000000
--- a/env2/lib/python2.7/site-packages/compose/config/validation.py
+++ /dev/null
@@ -1,421 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import json
-import logging
-import os
-import re
-import sys
-
-import six
-from docker.utils.ports import split_port
-from jsonschema import Draft4Validator
-from jsonschema import FormatChecker
-from jsonschema import RefResolver
-from jsonschema import ValidationError
-
-from ..const import COMPOSEFILE_V1 as V1
-from .errors import ConfigurationError
-from .errors import VERSION_EXPLANATION
-from .sort_services import get_service_name_from_network_mode
-
-
-log = logging.getLogger(__name__)
-
-
-DOCKER_CONFIG_HINTS = {
-    'cpu_share': 'cpu_shares',
-    'add_host': 'extra_hosts',
-    'hosts': 'extra_hosts',
-    'extra_host': 'extra_hosts',
-    'device': 'devices',
-    'link': 'links',
-    'memory_swap': 'memswap_limit',
-    'port': 'ports',
-    'privilege': 'privileged',
-    'priviliged': 'privileged',
-    'privilige': 'privileged',
-    'volume': 'volumes',
-    'workdir': 'working_dir',
-}
-
-
-VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]'
-VALID_EXPOSE_FORMAT = r'^\d+(\-\d+)?(\/[a-zA-Z]+)?$'
-
-
-@FormatChecker.cls_checks(format="ports", raises=ValidationError)
-def format_ports(instance):
-    try:
-        split_port(instance)
-    except ValueError as e:
-        raise ValidationError(six.text_type(e))
-    return True
-
-
-@FormatChecker.cls_checks(format="expose", raises=ValidationError)
-def format_expose(instance):
-    if isinstance(instance, six.string_types):
-        if not re.match(VALID_EXPOSE_FORMAT, instance):
-            raise ValidationError(
-                "should be of the format 'PORT[/PROTOCOL]'")
-
-    return True
-
-
-def match_named_volumes(service_dict, project_volumes):
-    service_volumes = service_dict.get('volumes', [])
-    for volume_spec in service_volumes:
-        if volume_spec.is_named_volume and volume_spec.external not in project_volumes:
-            raise ConfigurationError(
-                'Named volume "{0}" is used in service "{1}" but no'
-                ' declaration was found in the volumes section.'.format(
-                    volume_spec.repr(), service_dict.get('name')
-                )
-            )
-
-
-def python_type_to_yaml_type(type_):
-    type_name = type(type_).__name__
-    return {
-        'dict': 'mapping',
-        'list': 'array',
-        'int': 'number',
-        'float': 'number',
-        'bool': 'boolean',
-        'unicode': 'string',
-        'str': 'string',
-        'bytes': 'string',
-    }.get(type_name, type_name)
-
-
-def validate_config_section(filename, config, section):
-    """Validate the structure of a configuration section. This must be done
-    before interpolation so it's separate from schema validation.
-    """
-    if not isinstance(config, dict):
-        raise ConfigurationError(
-            "In file '{filename}', {section} must be a mapping, not "
-            "{type}.".format(
-                filename=filename,
-                section=section,
-                type=anglicize_json_type(python_type_to_yaml_type(config))))
-
-    for key, value in config.items():
-        if not isinstance(key, six.string_types):
-            raise ConfigurationError(
-                "In file '{filename}', the {section} name {name} must be a "
-                "quoted string, i.e. '{name}'.".format(
-                    filename=filename,
-                    section=section,
-                    name=key))
-
-        if not isinstance(value, (dict, type(None))):
-            raise ConfigurationError(
-                "In file '{filename}', {section} '{name}' must be a mapping not "
-                "{type}.".format(
-                    filename=filename,
-                    section=section,
-                    name=key,
-                    type=anglicize_json_type(python_type_to_yaml_type(value))))
-
-
-def validate_top_level_object(config_file):
-    if not isinstance(config_file.config, dict):
-        raise ConfigurationError(
-            "Top level object in '{}' needs to be an object not '{}'.".format(
-                config_file.filename,
-                type(config_file.config)))
-
-
-def validate_ulimits(service_config):
-    ulimit_config = service_config.config.get('ulimits', {})
-    for limit_name, soft_hard_values in six.iteritems(ulimit_config):
-        if isinstance(soft_hard_values, dict):
-            if not soft_hard_values['soft'] <= soft_hard_values['hard']:
-                raise ConfigurationError(
-                    "Service '{s.name}' has invalid ulimit '{ulimit}'. "
-                    "'soft' value can not be greater than 'hard' value ".format(
-                        s=service_config,
-                        ulimit=ulimit_config))
-
-
-def validate_extends_file_path(service_name, extends_options, filename):
-    """
-    The service to be extended must either be defined in the config key 'file',
-    or within 'filename'.
-    """
-    error_prefix = "Invalid 'extends' configuration for %s:" % service_name
-
-    if 'file' not in extends_options and filename is None:
-        raise ConfigurationError(
-            "%s you need to specify a 'file', e.g. 'file: something.yml'" % error_prefix
-        )
-
-
-def validate_network_mode(service_config, service_names):
-    network_mode = service_config.config.get('network_mode')
-    if not network_mode:
-        return
-
-    if 'networks' in service_config.config:
-        raise ConfigurationError("'network_mode' and 'networks' cannot be combined")
-
-    dependency = get_service_name_from_network_mode(network_mode)
-    if not dependency:
-        return
-
-    if dependency not in service_names:
-        raise ConfigurationError(
-            "Service '{s.name}' uses the network stack of service '{dep}' which "
-            "is undefined.".format(s=service_config, dep=dependency))
-
-
-def validate_links(service_config, service_names):
-    for link in service_config.config.get('links', []):
-        if link.split(':')[0] not in service_names:
-            raise ConfigurationError(
-                "Service '{s.name}' has a link to service '{link}' which is "
-                "undefined.".format(s=service_config, link=link))
-
-
-def validate_depends_on(service_config, service_names):
-    for dependency in service_config.config.get('depends_on', []):
-        if dependency not in service_names:
-            raise ConfigurationError(
-                "Service '{s.name}' depends on service '{dep}' which is "
-                "undefined.".format(s=service_config, dep=dependency))
-
-
-def get_unsupported_config_msg(path, error_key):
-    msg = "Unsupported config option for {}: '{}'".format(path_string(path), error_key)
-    if error_key in DOCKER_CONFIG_HINTS:
-        msg += " (did you mean '{}'?)".format(DOCKER_CONFIG_HINTS[error_key])
-    return msg
-
-
-def anglicize_json_type(json_type):
-    if json_type.startswith(('a', 'e', 'i', 'o', 'u')):
-        return 'an ' + json_type
-    return 'a ' + json_type
-
-
-def is_service_dict_schema(schema_id):
-    return schema_id in ('config_schema_v1.json',  '#/properties/services')
-
-
-def handle_error_for_schema_with_id(error, path):
-    schema_id = error.schema['id']
-
-    if is_service_dict_schema(schema_id) and error.validator == 'additionalProperties':
-        return "Invalid service name '{}' - only {} characters are allowed".format(
-            # The service_name is the key to the json object
-            list(error.instance)[0],
-            VALID_NAME_CHARS)
-
-    if error.validator == 'additionalProperties':
-        if schema_id == '#/definitions/service':
-            invalid_config_key = parse_key_from_error_msg(error)
-            return get_unsupported_config_msg(path, invalid_config_key)
-
-        if not error.path:
-            return '{}\n\n{}'.format(error.message, VERSION_EXPLANATION)
-
-
-def handle_generic_error(error, path):
-    msg_format = None
-    error_msg = error.message
-
-    if error.validator == 'oneOf':
-        msg_format = "{path} {msg}"
-        config_key, error_msg = _parse_oneof_validator(error)
-        if config_key:
-            path.append(config_key)
-
-    elif error.validator == 'type':
-        msg_format = "{path} contains an invalid type, it should be {msg}"
-        error_msg = _parse_valid_types_from_validator(error.validator_value)
-
-    elif error.validator == 'required':
-        error_msg = ", ".join(error.validator_value)
-        msg_format = "{path} is invalid, {msg} is required."
-
-    elif error.validator == 'dependencies':
-        config_key = list(error.validator_value.keys())[0]
-        required_keys = ",".join(error.validator_value[config_key])
-
-        msg_format = "{path} is invalid: {msg}"
-        path.append(config_key)
-        error_msg = "when defining '{}' you must set '{}' as well".format(
-            config_key,
-            required_keys)
-
-    elif error.cause:
-        error_msg = six.text_type(error.cause)
-        msg_format = "{path} is invalid: {msg}"
-
-    elif error.path:
-        msg_format = "{path} value {msg}"
-
-    if msg_format:
-        return msg_format.format(path=path_string(path), msg=error_msg)
-
-    return error.message
-
-
-def parse_key_from_error_msg(error):
-    return error.message.split("'")[1]
-
-
-def path_string(path):
-    return ".".join(c for c in path if isinstance(c, six.string_types))
-
-
-def _parse_valid_types_from_validator(validator):
-    """A validator value can be either an array of valid types or a string of
-    a valid type. Parse the valid types and prefix with the correct article.
-    """
-    if not isinstance(validator, list):
-        return anglicize_json_type(validator)
-
-    if len(validator) == 1:
-        return anglicize_json_type(validator[0])
-
-    return "{}, or {}".format(
-        ", ".join([anglicize_json_type(validator[0])] + validator[1:-1]),
-        anglicize_json_type(validator[-1]))
-
-
-def _parse_oneof_validator(error):
-    """oneOf has multiple schemas, so we need to reason about which schema, sub
-    schema or constraint the validation is failing on.
-    Inspecting the context value of a ValidationError gives us information about
-    which sub schema failed and which kind of error it is.
-    """
-    types = []
-    for context in error.context:
-
-        if context.validator == 'oneOf':
-            _, error_msg = _parse_oneof_validator(context)
-            return path_string(context.path), error_msg
-
-        if context.validator == 'required':
-            return (None, context.message)
-
-        if context.validator == 'additionalProperties':
-            invalid_config_key = parse_key_from_error_msg(context)
-            return (None, "contains unsupported option: '{}'".format(invalid_config_key))
-
-        if context.path:
-            return (
-                path_string(context.path),
-                "contains {}, which is an invalid type, it should be {}".format(
-                    json.dumps(context.instance),
-                    _parse_valid_types_from_validator(context.validator_value)),
-            )
-
-        if context.validator == 'uniqueItems':
-            return (
-                None,
-                "contains non unique items, please remove duplicates from {}".format(
-                    context.instance),
-            )
-
-        if context.validator == 'type':
-            types.append(context.validator_value)
-
-    valid_types = _parse_valid_types_from_validator(types)
-    return (None, "contains an invalid type, it should be {}".format(valid_types))
-
-
-def process_service_constraint_errors(error, service_name, version):
-    if version == V1:
-        if 'image' in error.instance and 'build' in error.instance:
-            return (
-                "Service {} has both an image and build path specified. "
-                "A service can either be built to image or use an existing "
-                "image, not both.".format(service_name))
-
-        if 'image' in error.instance and 'dockerfile' in error.instance:
-            return (
-                "Service {} has both an image and alternate Dockerfile. "
-                "A service can either be built to image or use an existing "
-                "image, not both.".format(service_name))
-
-    if 'image' not in error.instance and 'build' not in error.instance:
-        return (
-            "Service {} has neither an image nor a build context specified. "
-            "At least one must be provided.".format(service_name))
-
-
-def process_config_schema_errors(error):
-    path = list(error.path)
-
-    if 'id' in error.schema:
-        error_msg = handle_error_for_schema_with_id(error, path)
-        if error_msg:
-            return error_msg
-
-    return handle_generic_error(error, path)
-
-
-def validate_against_config_schema(config_file):
-    schema = load_jsonschema(config_file.version)
-    format_checker = FormatChecker(["ports", "expose"])
-    validator = Draft4Validator(
-        schema,
-        resolver=RefResolver(get_resolver_path(), schema),
-        format_checker=format_checker)
-    handle_errors(
-        validator.iter_errors(config_file.config),
-        process_config_schema_errors,
-        config_file.filename)
-
-
-def validate_service_constraints(config, service_name, version):
-    def handler(errors):
-        return process_service_constraint_errors(errors, service_name, version)
-
-    schema = load_jsonschema(version)
-    validator = Draft4Validator(schema['definitions']['constraints']['service'])
-    handle_errors(validator.iter_errors(config), handler, None)
-
-
-def get_schema_path():
-    return os.path.dirname(os.path.abspath(__file__))
-
-
-def load_jsonschema(version):
-    filename = os.path.join(
-        get_schema_path(),
-        "config_schema_v{0}.json".format(version))
-
-    with open(filename, "r") as fh:
-        return json.load(fh)
-
-
-def get_resolver_path():
-    schema_path = get_schema_path()
-    if sys.platform == "win32":
-        scheme = "///"
-        # TODO: why is this necessary?
-        schema_path = schema_path.replace('\\', '/')
-    else:
-        scheme = "//"
-    return "file:{}{}/".format(scheme, schema_path)
-
-
-def handle_errors(errors, format_error_func, filename):
-    """jsonschema returns an error tree full of information to explain what has
-    gone wrong. Process each error and pull out relevant information and re-write
-    helpful error messages that are relevant.
-    """
-    errors = list(sorted(errors, key=str))
-    if not errors:
-        return
-
-    error_msg = '\n'.join(format_error_func(error) for error in errors)
-    raise ConfigurationError(
-        "The Compose file{file_msg} is invalid because:\n{error_msg}".format(
-            file_msg=" '{}'".format(filename) if filename else "",
-            error_msg=error_msg))
diff --git a/env2/lib/python2.7/site-packages/compose/const.py b/env2/lib/python2.7/site-packages/compose/const.py
deleted file mode 100644
index b930e0b..0000000
--- a/env2/lib/python2.7/site-packages/compose/const.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import sys
-
-DEFAULT_TIMEOUT = 10
-HTTP_TIMEOUT = 60
-IMAGE_EVENTS = ['delete', 'import', 'pull', 'push', 'tag', 'untag']
-IS_WINDOWS_PLATFORM = (sys.platform == "win32")
-LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
-LABEL_ONE_OFF = 'com.docker.compose.oneoff'
-LABEL_PROJECT = 'com.docker.compose.project'
-LABEL_SERVICE = 'com.docker.compose.service'
-LABEL_VERSION = 'com.docker.compose.version'
-LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
-
-COMPOSEFILE_V1 = '1'
-COMPOSEFILE_V2_0 = '2.0'
-
-API_VERSIONS = {
-    COMPOSEFILE_V1: '1.21',
-    COMPOSEFILE_V2_0: '1.22',
-}
-
-API_VERSION_TO_ENGINE_VERSION = {
-    API_VERSIONS[COMPOSEFILE_V1]: '1.9.0',
-    API_VERSIONS[COMPOSEFILE_V2_0]: '1.10.0'
-}
diff --git a/env2/lib/python2.7/site-packages/compose/container.py b/env2/lib/python2.7/site-packages/compose/container.py
deleted file mode 100644
index 2c16863..0000000
--- a/env2/lib/python2.7/site-packages/compose/container.py
+++ /dev/null
@@ -1,272 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-from functools import reduce
-
-import six
-
-from .const import LABEL_CONTAINER_NUMBER
-from .const import LABEL_PROJECT
-from .const import LABEL_SERVICE
-
-
-class Container(object):
-    """
-    Represents a Docker container, constructed from the output of
-    GET /containers/:id:/json.
-    """
-    def __init__(self, client, dictionary, has_been_inspected=False):
-        self.client = client
-        self.dictionary = dictionary
-        self.has_been_inspected = has_been_inspected
-        self.log_stream = None
-
-    @classmethod
-    def from_ps(cls, client, dictionary, **kwargs):
-        """
-        Construct a container object from the output of GET /containers/json.
-        """
-        name = get_container_name(dictionary)
-        if name is None:
-            return None
-
-        new_dictionary = {
-            'Id': dictionary['Id'],
-            'Image': dictionary['Image'],
-            'Name': '/' + name,
-        }
-        return cls(client, new_dictionary, **kwargs)
-
-    @classmethod
-    def from_id(cls, client, id):
-        return cls(client, client.inspect_container(id), has_been_inspected=True)
-
-    @classmethod
-    def create(cls, client, **options):
-        response = client.create_container(**options)
-        return cls.from_id(client, response['Id'])
-
-    @property
-    def id(self):
-        return self.dictionary['Id']
-
-    @property
-    def image(self):
-        return self.dictionary['Image']
-
-    @property
-    def image_config(self):
-        return self.client.inspect_image(self.image)
-
-    @property
-    def short_id(self):
-        return self.id[:12]
-
-    @property
-    def name(self):
-        return self.dictionary['Name'][1:]
-
-    @property
-    def service(self):
-        return self.labels.get(LABEL_SERVICE)
-
-    @property
-    def name_without_project(self):
-        project = self.labels.get(LABEL_PROJECT)
-
-        if self.name.startswith('{0}_{1}'.format(project, self.service)):
-            return '{0}_{1}'.format(self.service, self.number)
-        else:
-            return self.name
-
-    @property
-    def number(self):
-        number = self.labels.get(LABEL_CONTAINER_NUMBER)
-        if not number:
-            raise ValueError("Container {0} does not have a {1} label".format(
-                self.short_id, LABEL_CONTAINER_NUMBER))
-        return int(number)
-
-    @property
-    def ports(self):
-        self.inspect_if_not_inspected()
-        return self.get('NetworkSettings.Ports') or {}
-
-    @property
-    def human_readable_ports(self):
-        def format_port(private, public):
-            if not public:
-                return private
-            return '{HostIp}:{HostPort}->{private}'.format(
-                private=private, **public[0])
-
-        return ', '.join(format_port(*item)
-                         for item in sorted(six.iteritems(self.ports)))
-
-    @property
-    def labels(self):
-        return self.get('Config.Labels') or {}
-
-    @property
-    def stop_signal(self):
-        return self.get('Config.StopSignal')
-
-    @property
-    def log_config(self):
-        return self.get('HostConfig.LogConfig') or None
-
-    @property
-    def human_readable_state(self):
-        if self.is_paused:
-            return 'Paused'
-        if self.is_restarting:
-            return 'Restarting'
-        if self.is_running:
-            return 'Ghost' if self.get('State.Ghost') else 'Up'
-        else:
-            return 'Exit %s' % self.get('State.ExitCode')
-
-    @property
-    def human_readable_command(self):
-        entrypoint = self.get('Config.Entrypoint') or []
-        cmd = self.get('Config.Cmd') or []
-        return ' '.join(entrypoint + cmd)
-
-    @property
-    def environment(self):
-        def parse_env(var):
-            if '=' in var:
-                return var.split("=", 1)
-            return var, None
-        return dict(parse_env(var) for var in self.get('Config.Env') or [])
-
-    @property
-    def exit_code(self):
-        return self.get('State.ExitCode')
-
-    @property
-    def is_running(self):
-        return self.get('State.Running')
-
-    @property
-    def is_restarting(self):
-        return self.get('State.Restarting')
-
-    @property
-    def is_paused(self):
-        return self.get('State.Paused')
-
-    @property
-    def log_driver(self):
-        return self.get('HostConfig.LogConfig.Type')
-
-    @property
-    def has_api_logs(self):
-        log_type = self.log_driver
-        return not log_type or log_type != 'none'
-
-    def attach_log_stream(self):
-        """A log stream can only be attached if the container uses a json-file
-        log driver.
-        """
-        if self.has_api_logs:
-            self.log_stream = self.attach(stdout=True, stderr=True, stream=True)
-
-    def get(self, key):
-        """Return a value from the container or None if the value is not set.
-
-        :param key: a string using dotted notation for nested dictionary
-                    lookups
-        """
-        self.inspect_if_not_inspected()
-
-        def get_value(dictionary, key):
-            return (dictionary or {}).get(key)
-
-        return reduce(get_value, key.split('.'), self.dictionary)
-
-    def get_local_port(self, port, protocol='tcp'):
-        port = self.ports.get("%s/%s" % (port, protocol))
-        return "{HostIp}:{HostPort}".format(**port[0]) if port else None
-
-    def get_mount(self, mount_dest):
-        for mount in self.get('Mounts'):
-            if mount['Destination'] == mount_dest:
-                return mount
-        return None
-
-    def start(self, **options):
-        return self.client.start(self.id, **options)
-
-    def stop(self, **options):
-        return self.client.stop(self.id, **options)
-
-    def pause(self, **options):
-        return self.client.pause(self.id, **options)
-
-    def unpause(self, **options):
-        return self.client.unpause(self.id, **options)
-
-    def kill(self, **options):
-        return self.client.kill(self.id, **options)
-
-    def restart(self, **options):
-        return self.client.restart(self.id, **options)
-
-    def remove(self, **options):
-        return self.client.remove_container(self.id, **options)
-
-    def create_exec(self, command, **options):
-        return self.client.exec_create(self.id, command, **options)
-
-    def start_exec(self, exec_id, **options):
-        return self.client.exec_start(exec_id, **options)
-
-    def rename_to_tmp_name(self):
-        """Rename the container to a hopefully unique temporary container name
-        by prepending the short id.
-        """
-        self.client.rename(
-            self.id,
-            '%s_%s' % (self.short_id, self.name)
-        )
-
-    def inspect_if_not_inspected(self):
-        if not self.has_been_inspected:
-            self.inspect()
-
-    def wait(self):
-        return self.client.wait(self.id)
-
-    def logs(self, *args, **kwargs):
-        return self.client.logs(self.id, *args, **kwargs)
-
-    def inspect(self):
-        self.dictionary = self.client.inspect_container(self.id)
-        self.has_been_inspected = True
-        return self.dictionary
-
-    def attach(self, *args, **kwargs):
-        return self.client.attach(self.id, *args, **kwargs)
-
-    def __repr__(self):
-        return '<Container: %s (%s)>' % (self.name, self.id[:6])
-
-    def __eq__(self, other):
-        if type(self) != type(other):
-            return False
-        return self.id == other.id
-
-    def __hash__(self):
-        return self.id.__hash__()
-
-
-def get_container_name(container):
-    if not container.get('Name') and not container.get('Names'):
-        return None
-    # inspect
-    if 'Name' in container:
-        return container['Name']
-    # ps
-    shortest_name = min(container['Names'], key=lambda n: len(n.split('/')))
-    return shortest_name.split('/')[-1]
diff --git a/env2/lib/python2.7/site-packages/compose/errors.py b/env2/lib/python2.7/site-packages/compose/errors.py
deleted file mode 100644
index 9f68760..0000000
--- a/env2/lib/python2.7/site-packages/compose/errors.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-
-class OperationFailedError(Exception):
-    def __init__(self, reason):
-        self.msg = reason
diff --git a/env2/lib/python2.7/site-packages/compose/network.py b/env2/lib/python2.7/site-packages/compose/network.py
deleted file mode 100644
index affba7c..0000000
--- a/env2/lib/python2.7/site-packages/compose/network.py
+++ /dev/null
@@ -1,190 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import logging
-
-from docker.errors import NotFound
-from docker.utils import create_ipam_config
-from docker.utils import create_ipam_pool
-
-from .config import ConfigurationError
-
-
-log = logging.getLogger(__name__)
-
-
-class Network(object):
-    def __init__(self, client, project, name, driver=None, driver_opts=None,
-                 ipam=None, external_name=None):
-        self.client = client
-        self.project = project
-        self.name = name
-        self.driver = driver
-        self.driver_opts = driver_opts
-        self.ipam = create_ipam_config_from_dict(ipam)
-        self.external_name = external_name
-
-    def ensure(self):
-        if self.external_name:
-            try:
-                self.inspect()
-                log.debug(
-                    'Network {0} declared as external. No new '
-                    'network will be created.'.format(self.name)
-                )
-            except NotFound:
-                raise ConfigurationError(
-                    'Network {name} declared as external, but could'
-                    ' not be found. Please create the network manually'
-                    ' using `{command} {name}` and try again.'.format(
-                        name=self.external_name,
-                        command='docker network create'
-                    )
-                )
-            return
-
-        try:
-            data = self.inspect()
-            if self.driver and data['Driver'] != self.driver:
-                raise ConfigurationError(
-                    'Network "{}" needs to be recreated - driver has changed'
-                    .format(self.full_name))
-            if data['Options'] != (self.driver_opts or {}):
-                raise ConfigurationError(
-                    'Network "{}" needs to be recreated - options have changed'
-                    .format(self.full_name))
-        except NotFound:
-            driver_name = 'the default driver'
-            if self.driver:
-                driver_name = 'driver "{}"'.format(self.driver)
-
-            log.info(
-                'Creating network "{}" with {}'
-                .format(self.full_name, driver_name)
-            )
-
-            self.client.create_network(
-                name=self.full_name,
-                driver=self.driver,
-                options=self.driver_opts,
-                ipam=self.ipam,
-            )
-
-    def remove(self):
-        if self.external_name:
-            log.info("Network %s is external, skipping", self.full_name)
-            return
-
-        log.info("Removing network {}".format(self.full_name))
-        self.client.remove_network(self.full_name)
-
-    def inspect(self):
-        return self.client.inspect_network(self.full_name)
-
-    @property
-    def full_name(self):
-        if self.external_name:
-            return self.external_name
-        return '{0}_{1}'.format(self.project, self.name)
-
-
-def create_ipam_config_from_dict(ipam_dict):
-    if not ipam_dict:
-        return None
-
-    return create_ipam_config(
-        driver=ipam_dict.get('driver'),
-        pool_configs=[
-            create_ipam_pool(
-                subnet=config.get('subnet'),
-                iprange=config.get('ip_range'),
-                gateway=config.get('gateway'),
-                aux_addresses=config.get('aux_addresses'),
-            )
-            for config in ipam_dict.get('config', [])
-        ],
-    )
-
-
-def build_networks(name, config_data, client):
-    network_config = config_data.networks or {}
-    networks = {
-        network_name: Network(
-            client=client, project=name, name=network_name,
-            driver=data.get('driver'),
-            driver_opts=data.get('driver_opts'),
-            ipam=data.get('ipam'),
-            external_name=data.get('external_name'),
-        )
-        for network_name, data in network_config.items()
-    }
-
-    if 'default' not in networks:
-        networks['default'] = Network(client, name, 'default')
-
-    return networks
-
-
-class ProjectNetworks(object):
-
-    def __init__(self, networks, use_networking):
-        self.networks = networks or {}
-        self.use_networking = use_networking
-
-    @classmethod
-    def from_services(cls, services, networks, use_networking):
-        service_networks = {
-            network: networks.get(network)
-            for service in services
-            for network in get_network_names_for_service(service)
-        }
-        unused = set(networks) - set(service_networks) - {'default'}
-        if unused:
-            log.warn(
-                "Some networks were defined but are not used by any service: "
-                "{}".format(", ".join(unused)))
-        return cls(service_networks, use_networking)
-
-    def remove(self):
-        if not self.use_networking:
-            return
-        for network in self.networks.values():
-            try:
-                network.remove()
-            except NotFound:
-                log.warn("Network %s not found.", network.full_name)
-
-    def initialize(self):
-        if not self.use_networking:
-            return
-
-        for network in self.networks.values():
-            network.ensure()
-
-
-def get_network_defs_for_service(service_dict):
-    if 'network_mode' in service_dict:
-        return {}
-    networks = service_dict.get('networks', {'default': None})
-    return dict(
-        (net, (config or {}))
-        for net, config in networks.items()
-    )
-
-
-def get_network_names_for_service(service_dict):
-    return get_network_defs_for_service(service_dict).keys()
-
-
-def get_networks(service_dict, network_definitions):
-    networks = {}
-    for name, netdef in get_network_defs_for_service(service_dict).items():
-        network = network_definitions.get(name)
-        if network:
-            networks[network.full_name] = netdef
-        else:
-            raise ConfigurationError(
-                'Service "{}" uses an undefined network "{}"'
-                .format(service_dict['name'], name))
-
-    return networks
diff --git a/env2/lib/python2.7/site-packages/compose/parallel.py b/env2/lib/python2.7/site-packages/compose/parallel.py
deleted file mode 100644
index 7ac66b3..0000000
--- a/env2/lib/python2.7/site-packages/compose/parallel.py
+++ /dev/null
@@ -1,254 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import logging
-import operator
-import sys
-from threading import Thread
-
-from docker.errors import APIError
-from six.moves import _thread as thread
-from six.moves.queue import Empty
-from six.moves.queue import Queue
-
-from compose.cli.signals import ShutdownException
-from compose.errors import OperationFailedError
-from compose.utils import get_output_stream
-
-
-log = logging.getLogger(__name__)
-
-STOP = object()
-
-
-def parallel_execute(objects, func, get_name, msg, get_deps=None):
-    """Runs func on objects in parallel while ensuring that func is
-    ran on object only after it is ran on all its dependencies.
-
-    get_deps called on object must return a collection with its dependencies.
-    get_name called on object must return its name.
-    """
-    objects = list(objects)
-    stream = get_output_stream(sys.stderr)
-
-    writer = ParallelStreamWriter(stream, msg)
-    for obj in objects:
-        writer.initialize(get_name(obj))
-
-    events = parallel_execute_iter(objects, func, get_deps)
-
-    errors = {}
-    results = []
-    error_to_reraise = None
-
-    for obj, result, exception in events:
-        if exception is None:
-            writer.write(get_name(obj), 'done')
-            results.append(result)
-        elif isinstance(exception, APIError):
-            errors[get_name(obj)] = exception.explanation
-            writer.write(get_name(obj), 'error')
-        elif isinstance(exception, OperationFailedError):
-            errors[get_name(obj)] = exception.msg
-            writer.write(get_name(obj), 'error')
-        elif isinstance(exception, UpstreamError):
-            writer.write(get_name(obj), 'error')
-        else:
-            errors[get_name(obj)] = exception
-            error_to_reraise = exception
-
-    for obj_name, error in errors.items():
-        stream.write("\nERROR: for {}  {}\n".format(obj_name, error))
-
-    if error_to_reraise:
-        raise error_to_reraise
-
-    return results, errors
-
-
-def _no_deps(x):
-    return []
-
-
-class State(object):
-    """
-    Holds the state of a partially-complete parallel operation.
-
-    state.started:   objects being processed
-    state.finished:  objects which have been processed
-    state.failed:    objects which either failed or whose dependencies failed
-    """
-    def __init__(self, objects):
-        self.objects = objects
-
-        self.started = set()
-        self.finished = set()
-        self.failed = set()
-
-    def is_done(self):
-        return len(self.finished) + len(self.failed) >= len(self.objects)
-
-    def pending(self):
-        return set(self.objects) - self.started - self.finished - self.failed
-
-
-def parallel_execute_iter(objects, func, get_deps):
-    """
-    Runs func on objects in parallel while ensuring that func is
-    ran on object only after it is ran on all its dependencies.
-
-    Returns an iterator of tuples which look like:
-
-    # if func returned normally when run on object
-    (object, result, None)
-
-    # if func raised an exception when run on object
-    (object, None, exception)
-
-    # if func raised an exception when run on one of object's dependencies
-    (object, None, UpstreamError())
-    """
-    if get_deps is None:
-        get_deps = _no_deps
-
-    results = Queue()
-    state = State(objects)
-
-    while True:
-        feed_queue(objects, func, get_deps, results, state)
-
-        try:
-            event = results.get(timeout=0.1)
-        except Empty:
-            continue
-        # See https://github.com/docker/compose/issues/189
-        except thread.error:
-            raise ShutdownException()
-
-        if event is STOP:
-            break
-
-        obj, _, exception = event
-        if exception is None:
-            log.debug('Finished processing: {}'.format(obj))
-            state.finished.add(obj)
-        else:
-            log.debug('Failed: {}'.format(obj))
-            state.failed.add(obj)
-
-        yield event
-
-
-def producer(obj, func, results):
-    """
-    The entry point for a producer thread which runs func on a single object.
-    Places a tuple on the results queue once func has either returned or raised.
-    """
-    try:
-        result = func(obj)
-        results.put((obj, result, None))
-    except Exception as e:
-        results.put((obj, None, e))
-
-
-def feed_queue(objects, func, get_deps, results, state):
-    """
-    Starts producer threads for any objects which are ready to be processed
-    (i.e. they have no dependencies which haven't been successfully processed).
-
-    Shortcuts any objects whose dependencies have failed and places an
-    (object, None, UpstreamError()) tuple on the results queue.
-    """
-    pending = state.pending()
-    log.debug('Pending: {}'.format(pending))
-
-    for obj in pending:
-        deps = get_deps(obj)
-
-        if any(dep in state.failed for dep in deps):
-            log.debug('{} has upstream errors - not processing'.format(obj))
-            results.put((obj, None, UpstreamError()))
-            state.failed.add(obj)
-        elif all(
-            dep not in objects or dep in state.finished
-            for dep in deps
-        ):
-            log.debug('Starting producer thread for {}'.format(obj))
-            t = Thread(target=producer, args=(obj, func, results))
-            t.daemon = True
-            t.start()
-            state.started.add(obj)
-
-    if state.is_done():
-        results.put(STOP)
-
-
-class UpstreamError(Exception):
-    pass
-
-
-class ParallelStreamWriter(object):
-    """Write out messages for operations happening in parallel.
-
-    Each operation has it's own line, and ANSI code characters are used
-    to jump to the correct line, and write over the line.
-    """
-
-    def __init__(self, stream, msg):
-        self.stream = stream
-        self.msg = msg
-        self.lines = []
-
-    def initialize(self, obj_index):
-        if self.msg is None:
-            return
-        self.lines.append(obj_index)
-        self.stream.write("{} {} ... \r\n".format(self.msg, obj_index))
-        self.stream.flush()
-
-    def write(self, obj_index, status):
-        if self.msg is None:
-            return
-        position = self.lines.index(obj_index)
-        diff = len(self.lines) - position
-        # move up
-        self.stream.write("%c[%dA" % (27, diff))
-        # erase
-        self.stream.write("%c[2K\r" % 27)
-        self.stream.write("{} {} ... {}\r".format(self.msg, obj_index, status))
-        # move back down
-        self.stream.write("%c[%dB" % (27, diff))
-        self.stream.flush()
-
-
-def parallel_operation(containers, operation, options, message):
-    parallel_execute(
-        containers,
-        operator.methodcaller(operation, **options),
-        operator.attrgetter('name'),
-        message)
-
-
-def parallel_remove(containers, options):
-    stopped_containers = [c for c in containers if not c.is_running]
-    parallel_operation(stopped_containers, 'remove', options, 'Removing')
-
-
-def parallel_start(containers, options):
-    parallel_operation(containers, 'start', options, 'Starting')
-
-
-def parallel_pause(containers, options):
-    parallel_operation(containers, 'pause', options, 'Pausing')
-
-
-def parallel_unpause(containers, options):
-    parallel_operation(containers, 'unpause', options, 'Unpausing')
-
-
-def parallel_kill(containers, options):
-    parallel_operation(containers, 'kill', options, 'Killing')
-
-
-def parallel_restart(containers, options):
-    parallel_operation(containers, 'restart', options, 'Restarting')
diff --git a/env2/lib/python2.7/site-packages/compose/progress_stream.py b/env2/lib/python2.7/site-packages/compose/progress_stream.py
deleted file mode 100644
index a0f5601..0000000
--- a/env2/lib/python2.7/site-packages/compose/progress_stream.py
+++ /dev/null
@@ -1,112 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-from compose import utils
-
-
-class StreamOutputError(Exception):
-    pass
-
-
-def stream_output(output, stream):
-    is_terminal = hasattr(stream, 'isatty') and stream.isatty()
-    stream = utils.get_output_stream(stream)
-    all_events = []
-    lines = {}
-    diff = 0
-
-    for event in utils.json_stream(output):
-        all_events.append(event)
-        is_progress_event = 'progress' in event or 'progressDetail' in event
-
-        if not is_progress_event:
-            print_output_event(event, stream, is_terminal)
-            stream.flush()
-            continue
-
-        if not is_terminal:
-            continue
-
-        # if it's a progress event and we have a terminal, then display the progress bars
-        image_id = event.get('id')
-        if not image_id:
-            continue
-
-        if image_id in lines:
-            diff = len(lines) - lines[image_id]
-        else:
-            lines[image_id] = len(lines)
-            stream.write("\n")
-            diff = 0
-
-        # move cursor up `diff` rows
-        stream.write("%c[%dA" % (27, diff))
-
-        print_output_event(event, stream, is_terminal)
-
-        if 'id' in event:
-            # move cursor back down
-            stream.write("%c[%dB" % (27, diff))
-
-        stream.flush()
-
-    return all_events
-
-
-def print_output_event(event, stream, is_terminal):
-    if 'errorDetail' in event:
-        raise StreamOutputError(event['errorDetail']['message'])
-
-    terminator = ''
-
-    if is_terminal and 'stream' not in event:
-        # erase current line
-        stream.write("%c[2K\r" % 27)
-        terminator = "\r"
-    elif 'progressDetail' in event:
-        return
-
-    if 'time' in event:
-        stream.write("[%s] " % event['time'])
-
-    if 'id' in event:
-        stream.write("%s: " % event['id'])
-
-    if 'from' in event:
-        stream.write("(from %s) " % event['from'])
-
-    status = event.get('status', '')
-
-    if 'progress' in event:
-        stream.write("%s %s%s" % (status, event['progress'], terminator))
-    elif 'progressDetail' in event:
-        detail = event['progressDetail']
-        total = detail.get('total')
-        if 'current' in detail and total:
-            percentage = float(detail['current']) / float(total) * 100
-            stream.write('%s (%.1f%%)%s' % (status, percentage, terminator))
-        else:
-            stream.write('%s%s' % (status, terminator))
-    elif 'stream' in event:
-        stream.write("%s%s" % (event['stream'], terminator))
-    else:
-        stream.write("%s%s\n" % (status, terminator))
-
-
-def get_digest_from_pull(events):
-    for event in events:
-        status = event.get('status')
-        if not status or 'Digest' not in status:
-            continue
-
-        _, digest = status.split(':', 1)
-        return digest.strip()
-    return None
-
-
-def get_digest_from_push(events):
-    for event in events:
-        digest = event.get('aux', {}).get('Digest')
-        if digest:
-            return digest
-    return None
diff --git a/env2/lib/python2.7/site-packages/compose/project.py b/env2/lib/python2.7/site-packages/compose/project.py
deleted file mode 100644
index f85e285..0000000
--- a/env2/lib/python2.7/site-packages/compose/project.py
+++ /dev/null
@@ -1,563 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import datetime
-import logging
-import operator
-from functools import reduce
-
-import enum
-from docker.errors import APIError
-
-from . import parallel
-from .config import ConfigurationError
-from .config.config import V1
-from .config.sort_services import get_container_name_from_network_mode
-from .config.sort_services import get_service_name_from_network_mode
-from .const import DEFAULT_TIMEOUT
-from .const import IMAGE_EVENTS
-from .const import LABEL_ONE_OFF
-from .const import LABEL_PROJECT
-from .const import LABEL_SERVICE
-from .container import Container
-from .network import build_networks
-from .network import get_networks
-from .network import ProjectNetworks
-from .service import BuildAction
-from .service import ContainerNetworkMode
-from .service import ConvergenceStrategy
-from .service import NetworkMode
-from .service import Service
-from .service import ServiceNetworkMode
-from .utils import microseconds_from_time_nano
-from .volume import ProjectVolumes
-
-
-log = logging.getLogger(__name__)
-
-
-@enum.unique
-class OneOffFilter(enum.Enum):
-    include = 0
-    exclude = 1
-    only = 2
-
-    @classmethod
-    def update_labels(cls, value, labels):
-        if value == cls.only:
-            labels.append('{0}={1}'.format(LABEL_ONE_OFF, "True"))
-        elif value == cls.exclude:
-            labels.append('{0}={1}'.format(LABEL_ONE_OFF, "False"))
-        elif value == cls.include:
-            pass
-        else:
-            raise ValueError("Invalid value for one_off: {}".format(repr(value)))
-
-
-class Project(object):
-    """
-    A collection of services.
-    """
-    def __init__(self, name, services, client, networks=None, volumes=None):
-        self.name = name
-        self.services = services
-        self.client = client
-        self.volumes = volumes or ProjectVolumes({})
-        self.networks = networks or ProjectNetworks({}, False)
-
-    def labels(self, one_off=OneOffFilter.exclude):
-        labels = ['{0}={1}'.format(LABEL_PROJECT, self.name)]
-
-        OneOffFilter.update_labels(one_off, labels)
-        return labels
-
-    @classmethod
-    def from_config(cls, name, config_data, client):
-        """
-        Construct a Project from a config.Config object.
-        """
-        use_networking = (config_data.version and config_data.version != V1)
-        networks = build_networks(name, config_data, client)
-        project_networks = ProjectNetworks.from_services(
-            config_data.services,
-            networks,
-            use_networking)
-        volumes = ProjectVolumes.from_config(name, config_data, client)
-        project = cls(name, [], client, project_networks, volumes)
-
-        for service_dict in config_data.services:
-            service_dict = dict(service_dict)
-            if use_networking:
-                service_networks = get_networks(service_dict, networks)
-            else:
-                service_networks = {}
-
-            service_dict.pop('networks', None)
-            links = project.get_links(service_dict)
-            network_mode = project.get_network_mode(
-                service_dict, list(service_networks.keys())
-            )
-            volumes_from = get_volumes_from(project, service_dict)
-
-            if config_data.version != V1:
-                service_dict['volumes'] = [
-                    volumes.namespace_spec(volume_spec)
-                    for volume_spec in service_dict.get('volumes', [])
-                ]
-
-            project.services.append(
-                Service(
-                    service_dict.pop('name'),
-                    client=client,
-                    project=name,
-                    use_networking=use_networking,
-                    networks=service_networks,
-                    links=links,
-                    network_mode=network_mode,
-                    volumes_from=volumes_from,
-                    **service_dict)
-            )
-
-        return project
-
-    @property
-    def service_names(self):
-        return [service.name for service in self.services]
-
-    def get_service(self, name):
-        """
-        Retrieve a service by name. Raises NoSuchService
-        if the named service does not exist.
-        """
-        for service in self.services:
-            if service.name == name:
-                return service
-
-        raise NoSuchService(name)
-
-    def validate_service_names(self, service_names):
-        """
-        Validate that the given list of service names only contains valid
-        services. Raises NoSuchService if one of the names is invalid.
-        """
-        valid_names = self.service_names
-        for name in service_names:
-            if name not in valid_names:
-                raise NoSuchService(name)
-
-    def get_services(self, service_names=None, include_deps=False):
-        """
-        Returns a list of this project's services filtered
-        by the provided list of names, or all services if service_names is None
-        or [].
-
-        If include_deps is specified, returns a list including the dependencies for
-        service_names, in order of dependency.
-
-        Preserves the original order of self.services where possible,
-        reordering as needed to resolve dependencies.
-
-        Raises NoSuchService if any of the named services do not exist.
-        """
-        if service_names is None or len(service_names) == 0:
-            service_names = self.service_names
-
-        unsorted = [self.get_service(name) for name in service_names]
-        services = [s for s in self.services if s in unsorted]
-
-        if include_deps:
-            services = reduce(self._inject_deps, services, [])
-
-        uniques = []
-        [uniques.append(s) for s in services if s not in uniques]
-
-        return uniques
-
-    def get_services_without_duplicate(self, service_names=None, include_deps=False):
-        services = self.get_services(service_names, include_deps)
-        for service in services:
-            service.remove_duplicate_containers()
-        return services
-
-    def get_links(self, service_dict):
-        links = []
-        if 'links' in service_dict:
-            for link in service_dict.get('links', []):
-                if ':' in link:
-                    service_name, link_name = link.split(':', 1)
-                else:
-                    service_name, link_name = link, None
-                try:
-                    links.append((self.get_service(service_name), link_name))
-                except NoSuchService:
-                    raise ConfigurationError(
-                        'Service "%s" has a link to service "%s" which does not '
-                        'exist.' % (service_dict['name'], service_name))
-            del service_dict['links']
-        return links
-
-    def get_network_mode(self, service_dict, networks):
-        network_mode = service_dict.pop('network_mode', None)
-        if not network_mode:
-            if self.networks.use_networking:
-                return NetworkMode(networks[0]) if networks else NetworkMode('none')
-            return NetworkMode(None)
-
-        service_name = get_service_name_from_network_mode(network_mode)
-        if service_name:
-            return ServiceNetworkMode(self.get_service(service_name))
-
-        container_name = get_container_name_from_network_mode(network_mode)
-        if container_name:
-            try:
-                return ContainerNetworkMode(Container.from_id(self.client, container_name))
-            except APIError:
-                raise ConfigurationError(
-                    "Service '{name}' uses the network stack of container '{dep}' which "
-                    "does not exist.".format(name=service_dict['name'], dep=container_name))
-
-        return NetworkMode(network_mode)
-
-    def start(self, service_names=None, **options):
-        containers = []
-
-        def start_service(service):
-            service_containers = service.start(quiet=True, **options)
-            containers.extend(service_containers)
-
-        services = self.get_services(service_names)
-
-        def get_deps(service):
-            return {self.get_service(dep) for dep in service.get_dependency_names()}
-
-        parallel.parallel_execute(
-            services,
-            start_service,
-            operator.attrgetter('name'),
-            'Starting',
-            get_deps)
-
-        return containers
-
-    def stop(self, service_names=None, one_off=OneOffFilter.exclude, **options):
-        containers = self.containers(service_names, one_off=one_off)
-
-        def get_deps(container):
-            # actually returning inversed dependencies
-            return {other for other in containers
-                    if container.service in
-                    self.get_service(other.service).get_dependency_names()}
-
-        parallel.parallel_execute(
-            containers,
-            operator.methodcaller('stop', **options),
-            operator.attrgetter('name'),
-            'Stopping',
-            get_deps)
-
-    def pause(self, service_names=None, **options):
-        containers = self.containers(service_names)
-        parallel.parallel_pause(reversed(containers), options)
-        return containers
-
-    def unpause(self, service_names=None, **options):
-        containers = self.containers(service_names)
-        parallel.parallel_unpause(containers, options)
-        return containers
-
-    def kill(self, service_names=None, **options):
-        parallel.parallel_kill(self.containers(service_names), options)
-
-    def remove_stopped(self, service_names=None, one_off=OneOffFilter.exclude, **options):
-        parallel.parallel_remove(self.containers(
-            service_names, stopped=True, one_off=one_off
-        ), options)
-
-    def down(self, remove_image_type, include_volumes, remove_orphans=False):
-        self.stop(one_off=OneOffFilter.include)
-        self.find_orphan_containers(remove_orphans)
-        self.remove_stopped(v=include_volumes, one_off=OneOffFilter.include)
-
-        self.networks.remove()
-
-        if include_volumes:
-            self.volumes.remove()
-
-        self.remove_images(remove_image_type)
-
-    def remove_images(self, remove_image_type):
-        for service in self.get_services():
-            service.remove_image(remove_image_type)
-
-    def restart(self, service_names=None, **options):
-        containers = self.containers(service_names, stopped=True)
-        parallel.parallel_restart(containers, options)
-        return containers
-
-    def build(self, service_names=None, no_cache=False, pull=False, force_rm=False):
-        for service in self.get_services(service_names):
-            if service.can_be_built():
-                service.build(no_cache, pull, force_rm)
-            else:
-                log.info('%s uses an image, skipping' % service.name)
-
-    def create(
-        self,
-        service_names=None,
-        strategy=ConvergenceStrategy.changed,
-        do_build=BuildAction.none,
-    ):
-        services = self.get_services_without_duplicate(service_names, include_deps=True)
-
-        for svc in services:
-            svc.ensure_image_exists(do_build=do_build)
-        plans = self._get_convergence_plans(services, strategy)
-
-        for service in services:
-            service.execute_convergence_plan(
-                plans[service.name],
-                detached=True,
-                start=False)
-
-    def events(self, service_names=None):
-        def build_container_event(event, container):
-            time = datetime.datetime.fromtimestamp(event['time'])
-            time = time.replace(
-                microsecond=microseconds_from_time_nano(event['timeNano']))
-            return {
-                'time': time,
-                'type': 'container',
-                'action': event['status'],
-                'id': container.id,
-                'service': container.service,
-                'attributes': {
-                    'name': container.name,
-                    'image': event['from'],
-                },
-                'container': container,
-            }
-
-        service_names = set(service_names or self.service_names)
-        for event in self.client.events(
-            filters={'label': self.labels()},
-            decode=True
-        ):
-            # The first part of this condition is a guard against some events
-            # broadcasted by swarm that don't have a status field.
-            # See https://github.com/docker/compose/issues/3316
-            if 'status' not in event or event['status'] in IMAGE_EVENTS:
-                # We don't receive any image events because labels aren't applied
-                # to images
-                continue
-
-            # TODO: get labels from the API v1.22 , see github issue 2618
-            try:
-                # this can fail if the conatiner has been removed
-                container = Container.from_id(self.client, event['id'])
-            except APIError:
-                continue
-            if container.service not in service_names:
-                continue
-            yield build_container_event(event, container)
-
-    def up(self,
-           service_names=None,
-           start_deps=True,
-           strategy=ConvergenceStrategy.changed,
-           do_build=BuildAction.none,
-           timeout=DEFAULT_TIMEOUT,
-           detached=False,
-           remove_orphans=False):
-
-        warn_for_swarm_mode(self.client)
-
-        self.initialize()
-        self.find_orphan_containers(remove_orphans)
-
-        services = self.get_services_without_duplicate(
-            service_names,
-            include_deps=start_deps)
-
-        for svc in services:
-            svc.ensure_image_exists(do_build=do_build)
-        plans = self._get_convergence_plans(services, strategy)
-
-        def do(service):
-            return service.execute_convergence_plan(
-                plans[service.name],
-                timeout=timeout,
-                detached=detached
-            )
-
-        def get_deps(service):
-            return {self.get_service(dep) for dep in service.get_dependency_names()}
-
-        results, errors = parallel.parallel_execute(
-            services,
-            do,
-            operator.attrgetter('name'),
-            None,
-            get_deps
-        )
-        if errors:
-            raise ProjectError(
-                'Encountered errors while bringing up the project.'
-            )
-
-        return [
-            container
-            for svc_containers in results
-            if svc_containers is not None
-            for container in svc_containers
-        ]
-
-    def initialize(self):
-        self.networks.initialize()
-        self.volumes.initialize()
-
-    def _get_convergence_plans(self, services, strategy):
-        plans = {}
-
-        for service in services:
-            updated_dependencies = [
-                name
-                for name in service.get_dependency_names()
-                if name in plans and
-                plans[name].action in ('recreate', 'create')
-            ]
-
-            if updated_dependencies and strategy.allows_recreate:
-                log.debug('%s has upstream changes (%s)',
-                          service.name,
-                          ", ".join(updated_dependencies))
-                plan = service.convergence_plan(ConvergenceStrategy.always)
-            else:
-                plan = service.convergence_plan(strategy)
-
-            plans[service.name] = plan
-
-        return plans
-
-    def pull(self, service_names=None, ignore_pull_failures=False):
-        for service in self.get_services(service_names, include_deps=False):
-            service.pull(ignore_pull_failures)
-
-    def push(self, service_names=None, ignore_push_failures=False):
-        for service in self.get_services(service_names, include_deps=False):
-            service.push(ignore_push_failures)
-
-    def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude):
-        return list(filter(None, [
-            Container.from_ps(self.client, container)
-            for container in self.client.containers(
-                all=stopped,
-                filters={'label': self.labels(one_off=one_off)})])
-        )
-
-    def containers(self, service_names=None, stopped=False, one_off=OneOffFilter.exclude):
-        if service_names:
-            self.validate_service_names(service_names)
-        else:
-            service_names = self.service_names
-
-        containers = self._labeled_containers(stopped, one_off)
-
-        def matches_service_names(container):
-            return container.labels.get(LABEL_SERVICE) in service_names
-
-        return [c for c in containers if matches_service_names(c)]
-
-    def find_orphan_containers(self, remove_orphans):
-        def _find():
-            containers = self._labeled_containers()
-            for ctnr in containers:
-                service_name = ctnr.labels.get(LABEL_SERVICE)
-                if service_name not in self.service_names:
-                    yield ctnr
-        orphans = list(_find())
-        if not orphans:
-            return
-        if remove_orphans:
-            for ctnr in orphans:
-                log.info('Removing orphan container "{0}"'.format(ctnr.name))
-                ctnr.kill()
-                ctnr.remove(force=True)
-        else:
-            log.warning(
-                'Found orphan containers ({0}) for this project. If '
-                'you removed or renamed this service in your compose '
-                'file, you can run this command with the '
-                '--remove-orphans flag to clean it up.'.format(
-                    ', '.join(["{}".format(ctnr.name) for ctnr in orphans])
-                )
-            )
-
-    def _inject_deps(self, acc, service):
-        dep_names = service.get_dependency_names()
-
-        if len(dep_names) > 0:
-            dep_services = self.get_services(
-                service_names=list(set(dep_names)),
-                include_deps=True
-            )
-        else:
-            dep_services = []
-
-        dep_services.append(service)
-        return acc + dep_services
-
-
-def get_volumes_from(project, service_dict):
-    volumes_from = service_dict.pop('volumes_from', None)
-    if not volumes_from:
-        return []
-
-    def build_volume_from(spec):
-        if spec.type == 'service':
-            try:
-                return spec._replace(source=project.get_service(spec.source))
-            except NoSuchService:
-                pass
-
-        if spec.type == 'container':
-            try:
-                container = Container.from_id(project.client, spec.source)
-                return spec._replace(source=container)
-            except APIError:
-                pass
-
-        raise ConfigurationError(
-            "Service \"{}\" mounts volumes from \"{}\", which is not the name "
-            "of a service or container.".format(
-                service_dict['name'],
-                spec.source))
-
-    return [build_volume_from(vf) for vf in volumes_from]
-
-
-def warn_for_swarm_mode(client):
-    info = client.info()
-    if info.get('Swarm', {}).get('LocalNodeState') == 'active':
-        log.warn(
-            "The Docker Engine you're using is running in swarm mode.\n\n"
-            "Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
-            "All containers will be scheduled on the current node.\n\n"
-            "To deploy your application across the swarm, "
-            "use the bundle feature of the Docker experimental build.\n\n"
-            "More info:\n"
-            "https://docs.docker.com/compose/bundles\n"
-        )
-
-
-class NoSuchService(Exception):
-    def __init__(self, name):
-        self.name = name
-        self.msg = "No such service: %s" % self.name
-
-    def __str__(self):
-        return self.msg
-
-
-class ProjectError(Exception):
-    def __init__(self, msg):
-        self.msg = msg
diff --git a/env2/lib/python2.7/site-packages/compose/service.py b/env2/lib/python2.7/site-packages/compose/service.py
deleted file mode 100644
index 7bb36cd..0000000
--- a/env2/lib/python2.7/site-packages/compose/service.py
+++ /dev/null
@@ -1,1122 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import logging
-import re
-import sys
-from collections import namedtuple
-from operator import attrgetter
-
-import enum
-import six
-from docker.errors import APIError
-from docker.utils import LogConfig
-from docker.utils.ports import build_port_bindings
-from docker.utils.ports import split_port
-
-from . import __version__
-from . import progress_stream
-from .config import DOCKER_CONFIG_KEYS
-from .config import merge_environment
-from .config.types import VolumeSpec
-from .const import DEFAULT_TIMEOUT
-from .const import LABEL_CONFIG_HASH
-from .const import LABEL_CONTAINER_NUMBER
-from .const import LABEL_ONE_OFF
-from .const import LABEL_PROJECT
-from .const import LABEL_SERVICE
-from .const import LABEL_VERSION
-from .container import Container
-from .errors import OperationFailedError
-from .parallel import parallel_execute
-from .parallel import parallel_start
-from .progress_stream import stream_output
-from .progress_stream import StreamOutputError
-from .utils import json_hash
-
-
-log = logging.getLogger(__name__)
-
-
-DOCKER_START_KEYS = [
-    'cap_add',
-    'cap_drop',
-    'cgroup_parent',
-    'cpu_quota',
-    'devices',
-    'dns',
-    'dns_search',
-    'env_file',
-    'extra_hosts',
-    'ipc',
-    'read_only',
-    'log_driver',
-    'log_opt',
-    'mem_limit',
-    'memswap_limit',
-    'pid',
-    'privileged',
-    'restart',
-    'security_opt',
-    'shm_size',
-    'volumes_from',
-]
-
-
-class BuildError(Exception):
-    def __init__(self, service, reason):
-        self.service = service
-        self.reason = reason
-
-
-class NeedsBuildError(Exception):
-    def __init__(self, service):
-        self.service = service
-
-
-class NoSuchImageError(Exception):
-    pass
-
-
-ServiceName = namedtuple('ServiceName', 'project service number')
-
-
-ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
-
-
-@enum.unique
-class ConvergenceStrategy(enum.Enum):
-    """Enumeration for all possible convergence strategies. Values refer to
-    when containers should be recreated.
-    """
-    changed = 1
-    always = 2
-    never = 3
-
-    @property
-    def allows_recreate(self):
-        return self is not type(self).never
-
-
-@enum.unique
-class ImageType(enum.Enum):
-    """Enumeration for the types of images known to compose."""
-    none = 0
-    local = 1
-    all = 2
-
-
-@enum.unique
-class BuildAction(enum.Enum):
-    """Enumeration for the possible build actions."""
-    none = 0
-    force = 1
-    skip = 2
-
-
-class Service(object):
-    def __init__(
-        self,
-        name,
-        client=None,
-        project='default',
-        use_networking=False,
-        links=None,
-        volumes_from=None,
-        network_mode=None,
-        networks=None,
-        **options
-    ):
-        self.name = name
-        self.client = client
-        self.project = project
-        self.use_networking = use_networking
-        self.links = links or []
-        self.volumes_from = volumes_from or []
-        self.network_mode = network_mode or NetworkMode(None)
-        self.networks = networks or {}
-        self.options = options
-
-    def __repr__(self):
-        return '<Service: {}>'.format(self.name)
-
-    def containers(self, stopped=False, one_off=False, filters={}):
-        filters.update({'label': self.labels(one_off=one_off)})
-
-        return list(filter(None, [
-            Container.from_ps(self.client, container)
-            for container in self.client.containers(
-                all=stopped,
-                filters=filters)]))
-
-    def get_container(self, number=1):
-        """Return a :class:`compose.container.Container` for this service. The
-        container must be active, and match `number`.
-        """
-        labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]
-        for container in self.client.containers(filters={'label': labels}):
-            return Container.from_ps(self.client, container)
-
-        raise ValueError("No container found for %s_%s" % (self.name, number))
-
-    def start(self, **options):
-        containers = self.containers(stopped=True)
-        for c in containers:
-            self.start_container_if_stopped(c, **options)
-        return containers
-
-    def scale(self, desired_num, timeout=DEFAULT_TIMEOUT):
-        """
-        Adjusts the number of containers to the specified number and ensures
-        they are running.
-
-        - creates containers until there are at least `desired_num`
-        - stops containers until there are at most `desired_num` running
-        - starts containers until there are at least `desired_num` running
-        - removes all stopped containers
-        """
-        if self.custom_container_name and desired_num > 1:
-            log.warn('The "%s" service is using the custom container name "%s". '
-                     'Docker requires each container to have a unique name. '
-                     'Remove the custom name to scale the service.'
-                     % (self.name, self.custom_container_name))
-
-        if self.specifies_host_port() and desired_num > 1:
-            log.warn('The "%s" service specifies a port on the host. If multiple containers '
-                     'for this service are created on a single host, the port will clash.'
-                     % self.name)
-
-        def create_and_start(service, number):
-            container = service.create_container(number=number, quiet=True)
-            service.start_container(container)
-            return container
-
-        def stop_and_remove(container):
-            container.stop(timeout=timeout)
-            container.remove()
-
-        running_containers = self.containers(stopped=False)
-        num_running = len(running_containers)
-
-        if desired_num == num_running:
-            # do nothing as we already have the desired number
-            log.info('Desired container number already achieved')
-            return
-
-        if desired_num > num_running:
-            # we need to start/create until we have desired_num
-            all_containers = self.containers(stopped=True)
-
-            if num_running != len(all_containers):
-                # we have some stopped containers, let's start them up again
-                stopped_containers = sorted(
-                    (c for c in all_containers if not c.is_running),
-                    key=attrgetter('number'))
-
-                num_stopped = len(stopped_containers)
-
-                if num_stopped + num_running > desired_num:
-                    num_to_start = desired_num - num_running
-                    containers_to_start = stopped_containers[:num_to_start]
-                else:
-                    containers_to_start = stopped_containers
-
-                parallel_start(containers_to_start, {})
-
-                num_running += len(containers_to_start)
-
-            num_to_create = desired_num - num_running
-            next_number = self._next_container_number()
-            container_numbers = [
-                number for number in range(
-                    next_number, next_number + num_to_create
-                )
-            ]
-
-            parallel_execute(
-                container_numbers,
-                lambda n: create_and_start(service=self, number=n),
-                lambda n: self.get_container_name(n),
-                "Creating and starting"
-            )
-
-        if desired_num < num_running:
-            num_to_stop = num_running - desired_num
-
-            sorted_running_containers = sorted(
-                running_containers,
-                key=attrgetter('number'))
-
-            parallel_execute(
-                sorted_running_containers[-num_to_stop:],
-                stop_and_remove,
-                lambda c: c.name,
-                "Stopping and removing",
-            )
-
-    def create_container(self,
-                         one_off=False,
-                         previous_container=None,
-                         number=None,
-                         quiet=False,
-                         **override_options):
-        """
-        Create a container for this service. If the image doesn't exist, attempt to pull
-        it.
-        """
-        # This is only necessary for `scale` and `volumes_from`
-        # auto-creating containers to satisfy the dependency.
-        self.ensure_image_exists()
-
-        container_options = self._get_container_create_options(
-            override_options,
-            number or self._next_container_number(one_off=one_off),
-            one_off=one_off,
-            previous_container=previous_container,
-        )
-
-        if 'name' in container_options and not quiet:
-            log.info("Creating %s" % container_options['name'])
-
-        try:
-            return Container.create(self.client, **container_options)
-        except APIError as ex:
-            raise OperationFailedError("Cannot create container for service %s: %s" %
-                                       (self.name, ex.explanation))
-
-    def ensure_image_exists(self, do_build=BuildAction.none):
-        if self.can_be_built() and do_build == BuildAction.force:
-            self.build()
-            return
-
-        try:
-            self.image()
-            return
-        except NoSuchImageError:
-            pass
-
-        if not self.can_be_built():
-            self.pull()
-            return
-
-        if do_build == BuildAction.skip:
-            raise NeedsBuildError(self)
-
-        self.build()
-        log.warn(
-            "Image for service {} was built because it did not already exist. To "
-            "rebuild this image you must use `docker-compose build` or "
-            "`docker-compose up --build`.".format(self.name))
-
-    def image(self):
-        try:
-            return self.client.inspect_image(self.image_name)
-        except APIError as e:
-            if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
-                raise NoSuchImageError("Image '{}' not found".format(self.image_name))
-            else:
-                raise
-
-    @property
-    def image_name(self):
-        return self.options.get('image', '{s.project}_{s.name}'.format(s=self))
-
-    def convergence_plan(self, strategy=ConvergenceStrategy.changed):
-        containers = self.containers(stopped=True)
-
-        if not containers:
-            return ConvergencePlan('create', [])
-
-        if strategy is ConvergenceStrategy.never:
-            return ConvergencePlan('start', containers)
-
-        if (
-            strategy is ConvergenceStrategy.always or
-            self._containers_have_diverged(containers)
-        ):
-            return ConvergencePlan('recreate', containers)
-
-        stopped = [c for c in containers if not c.is_running]
-
-        if stopped:
-            return ConvergencePlan('start', stopped)
-
-        return ConvergencePlan('noop', containers)
-
-    def _containers_have_diverged(self, containers):
-        config_hash = None
-
-        try:
-            config_hash = self.config_hash
-        except NoSuchImageError as e:
-            log.debug(
-                'Service %s has diverged: %s',
-                self.name, six.text_type(e),
-            )
-            return True
-
-        has_diverged = False
-
-        for c in containers:
-            container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
-            if container_config_hash != config_hash:
-                log.debug(
-                    '%s has diverged: %s != %s',
-                    c.name, container_config_hash, config_hash,
-                )
-                has_diverged = True
-
-        return has_diverged
-
-    def execute_convergence_plan(self,
-                                 plan,
-                                 timeout=DEFAULT_TIMEOUT,
-                                 detached=False,
-                                 start=True):
-        (action, containers) = plan
-        should_attach_logs = not detached
-
-        if action == 'create':
-            container = self.create_container()
-
-            if should_attach_logs:
-                container.attach_log_stream()
-
-            if start:
-                self.start_container(container)
-
-            return [container]
-
-        elif action == 'recreate':
-            return [
-                self.recreate_container(
-                    container,
-                    timeout=timeout,
-                    attach_logs=should_attach_logs,
-                    start_new_container=start
-                )
-                for container in containers
-            ]
-
-        elif action == 'start':
-            if start:
-                for container in containers:
-                    self.start_container_if_stopped(container, attach_logs=should_attach_logs)
-
-            return containers
-
-        elif action == 'noop':
-            for c in containers:
-                log.info("%s is up-to-date" % c.name)
-
-            return containers
-
-        else:
-            raise Exception("Invalid action: {}".format(action))
-
-    def recreate_container(
-            self,
-            container,
-            timeout=DEFAULT_TIMEOUT,
-            attach_logs=False,
-            start_new_container=True):
-        """Recreate a container.
-
-        The original container is renamed to a temporary name so that data
-        volumes can be copied to the new container, before the original
-        container is removed.
-        """
-        log.info("Recreating %s" % container.name)
-
-        container.stop(timeout=timeout)
-        container.rename_to_tmp_name()
-        new_container = self.create_container(
-            previous_container=container,
-            number=container.labels.get(LABEL_CONTAINER_NUMBER),
-            quiet=True,
-        )
-        if attach_logs:
-            new_container.attach_log_stream()
-        if start_new_container:
-            self.start_container(new_container)
-        container.remove()
-        return new_container
-
-    def start_container_if_stopped(self, container, attach_logs=False, quiet=False):
-        if not container.is_running:
-            if not quiet:
-                log.info("Starting %s" % container.name)
-            if attach_logs:
-                container.attach_log_stream()
-            return self.start_container(container)
-
-    def start_container(self, container):
-        self.connect_container_to_networks(container)
-        try:
-            container.start()
-        except APIError as ex:
-            raise OperationFailedError("Cannot start service %s: %s" % (self.name, ex.explanation))
-        return container
-
-    def connect_container_to_networks(self, container):
-        connected_networks = container.get('NetworkSettings.Networks')
-
-        for network, netdefs in self.networks.items():
-            if network in connected_networks:
-                if short_id_alias_exists(container, network):
-                    continue
-
-                self.client.disconnect_container_from_network(
-                    container.id,
-                    network)
-
-            self.client.connect_container_to_network(
-                container.id, network,
-                aliases=self._get_aliases(netdefs, container),
-                ipv4_address=netdefs.get('ipv4_address', None),
-                ipv6_address=netdefs.get('ipv6_address', None),
-                links=self._get_links(False))
-
-    def remove_duplicate_containers(self, timeout=DEFAULT_TIMEOUT):
-        for c in self.duplicate_containers():
-            log.info('Removing %s' % c.name)
-            c.stop(timeout=timeout)
-            c.remove()
-
-    def duplicate_containers(self):
-        containers = sorted(
-            self.containers(stopped=True),
-            key=lambda c: c.get('Created'),
-        )
-
-        numbers = set()
-
-        for c in containers:
-            if c.number in numbers:
-                yield c
-            else:
-                numbers.add(c.number)
-
-    @property
-    def config_hash(self):
-        return json_hash(self.config_dict())
-
-    def config_dict(self):
-        return {
-            'options': self.options,
-            'image_id': self.image()['Id'],
-            'links': self.get_link_names(),
-            'net': self.network_mode.id,
-            'networks': self.networks,
-            'volumes_from': [
-                (v.source.name, v.mode)
-                for v in self.volumes_from if isinstance(v.source, Service)
-            ],
-        }
-
-    def get_dependency_names(self):
-        net_name = self.network_mode.service_name
-        return (self.get_linked_service_names() +
-                self.get_volumes_from_names() +
-                ([net_name] if net_name else []) +
-                self.options.get('depends_on', []))
-
-    def get_linked_service_names(self):
-        return [service.name for (service, _) in self.links]
-
-    def get_link_names(self):
-        return [(service.name, alias) for service, alias in self.links]
-
-    def get_volumes_from_names(self):
-        return [s.source.name for s in self.volumes_from if isinstance(s.source, Service)]
-
-    # TODO: this would benefit from github.com/docker/docker/pull/14699
-    # to remove the need to inspect every container
-    def _next_container_number(self, one_off=False):
-        containers = filter(None, [
-            Container.from_ps(self.client, container)
-            for container in self.client.containers(
-                all=True,
-                filters={'label': self.labels(one_off=one_off)})
-        ])
-        numbers = [c.number for c in containers]
-        return 1 if not numbers else max(numbers) + 1
-
-    def _get_aliases(self, network, container=None):
-        if container and container.labels.get(LABEL_ONE_OFF) == "True":
-            return []
-
-        return list(
-            {self.name} |
-            ({container.short_id} if container else set()) |
-            set(network.get('aliases', ()))
-        )
-
-    def build_default_networking_config(self):
-        if not self.networks:
-            return {}
-
-        network = self.networks[self.network_mode.id]
-        endpoint = {
-            'Aliases': self._get_aliases(network),
-            'IPAMConfig': {},
-        }
-
-        if network.get('ipv4_address'):
-            endpoint['IPAMConfig']['IPv4Address'] = network.get('ipv4_address')
-        if network.get('ipv6_address'):
-            endpoint['IPAMConfig']['IPv6Address'] = network.get('ipv6_address')
-
-        return {"EndpointsConfig": {self.network_mode.id: endpoint}}
-
-    def _get_links(self, link_to_self):
-        links = {}
-
-        for service, link_name in self.links:
-            for container in service.containers():
-                links[link_name or service.name] = container.name
-                links[container.name] = container.name
-                links[container.name_without_project] = container.name
-
-        if link_to_self:
-            for container in self.containers():
-                links[self.name] = container.name
-                links[container.name] = container.name
-                links[container.name_without_project] = container.name
-
-        for external_link in self.options.get('external_links') or []:
-            if ':' not in external_link:
-                link_name = external_link
-            else:
-                external_link, link_name = external_link.split(':')
-            links[link_name] = external_link
-
-        return [
-            (alias, container_name)
-            for (container_name, alias) in links.items()
-        ]
-
-    def _get_volumes_from(self):
-        return [build_volume_from(spec) for spec in self.volumes_from]
-
-    def _get_container_create_options(
-            self,
-            override_options,
-            number,
-            one_off=False,
-            previous_container=None):
-        add_config_hash = (not one_off and not override_options)
-
-        container_options = dict(
-            (k, self.options[k])
-            for k in DOCKER_CONFIG_KEYS if k in self.options)
-        container_options.update(override_options)
-
-        if not container_options.get('name'):
-            container_options['name'] = self.get_container_name(number, one_off)
-
-        container_options.setdefault('detach', True)
-
-        # If a qualified hostname was given, split it into an
-        # unqualified hostname and a domainname unless domainname
-        # was also given explicitly. This matches the behavior of
-        # the official Docker CLI in that scenario.
-        if ('hostname' in container_options and
-                'domainname' not in container_options and
-                '.' in container_options['hostname']):
-            parts = container_options['hostname'].partition('.')
-            container_options['hostname'] = parts[0]
-            container_options['domainname'] = parts[2]
-
-        if 'ports' in container_options or 'expose' in self.options:
-            container_options['ports'] = build_container_ports(
-                container_options,
-                self.options)
-
-        container_options['environment'] = merge_environment(
-            self.options.get('environment'),
-            override_options.get('environment'))
-
-        binds, affinity = merge_volume_bindings(
-            container_options.get('volumes') or [],
-            previous_container)
-        override_options['binds'] = binds
-        container_options['environment'].update(affinity)
-
-        if 'volumes' in container_options:
-            container_options['volumes'] = dict(
-                (v.internal, {}) for v in container_options['volumes'])
-
-        container_options['image'] = self.image_name
-
-        container_options['labels'] = build_container_labels(
-            container_options.get('labels', {}),
-            self.labels(one_off=one_off),
-            number,
-            self.config_hash if add_config_hash else None)
-
-        # Delete options which are only used when starting
-        for key in DOCKER_START_KEYS:
-            container_options.pop(key, None)
-
-        container_options['host_config'] = self._get_container_host_config(
-            override_options,
-            one_off=one_off)
-
-        networking_config = self.build_default_networking_config()
-        if networking_config:
-            container_options['networking_config'] = networking_config
-
-        container_options['environment'] = format_environment(
-            container_options['environment'])
-        return container_options
-
-    def _get_container_host_config(self, override_options, one_off=False):
-        options = dict(self.options, **override_options)
-
-        logging_dict = options.get('logging', None)
-        log_config = get_log_config(logging_dict)
-
-        return self.client.create_host_config(
-            links=self._get_links(link_to_self=one_off),
-            port_bindings=build_port_bindings(options.get('ports') or []),
-            binds=options.get('binds'),
-            volumes_from=self._get_volumes_from(),
-            privileged=options.get('privileged', False),
-            network_mode=self.network_mode.mode,
-            devices=options.get('devices'),
-            dns=options.get('dns'),
-            dns_search=options.get('dns_search'),
-            restart_policy=options.get('restart'),
-            cap_add=options.get('cap_add'),
-            cap_drop=options.get('cap_drop'),
-            mem_limit=options.get('mem_limit'),
-            memswap_limit=options.get('memswap_limit'),
-            ulimits=build_ulimits(options.get('ulimits')),
-            log_config=log_config,
-            extra_hosts=options.get('extra_hosts'),
-            read_only=options.get('read_only'),
-            pid_mode=options.get('pid'),
-            security_opt=options.get('security_opt'),
-            ipc_mode=options.get('ipc'),
-            cgroup_parent=options.get('cgroup_parent'),
-            cpu_quota=options.get('cpu_quota'),
-            shm_size=options.get('shm_size'),
-            tmpfs=options.get('tmpfs'),
-        )
-
-    def build(self, no_cache=False, pull=False, force_rm=False):
-        log.info('Building %s' % self.name)
-
-        build_opts = self.options.get('build', {})
-        path = build_opts.get('context')
-        # python2 os.path() doesn't support unicode, so we need to encode it to
-        # a byte string
-        if not six.PY3:
-            path = path.encode('utf8')
-
-        build_output = self.client.build(
-            path=path,
-            tag=self.image_name,
-            stream=True,
-            rm=True,
-            forcerm=force_rm,
-            pull=pull,
-            nocache=no_cache,
-            dockerfile=build_opts.get('dockerfile', None),
-            buildargs=build_opts.get('args', None),
-        )
-
-        try:
-            all_events = stream_output(build_output, sys.stdout)
-        except StreamOutputError as e:
-            raise BuildError(self, six.text_type(e))
-
-        # Ensure the HTTP connection is not reused for another
-        # streaming command, as the Docker daemon can sometimes
-        # complain about it
-        self.client.close()
-
-        image_id = None
-
-        for event in all_events:
-            if 'stream' in event:
-                match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
-                if match:
-                    image_id = match.group(1)
-
-        if image_id is None:
-            raise BuildError(self, event if all_events else 'Unknown')
-
-        return image_id
-
-    def can_be_built(self):
-        return 'build' in self.options
-
-    def labels(self, one_off=False):
-        return [
-            '{0}={1}'.format(LABEL_PROJECT, self.project),
-            '{0}={1}'.format(LABEL_SERVICE, self.name),
-            '{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")
-        ]
-
-    @property
-    def custom_container_name(self):
-        return self.options.get('container_name')
-
-    def get_container_name(self, number, one_off=False):
-        if self.custom_container_name and not one_off:
-            return self.custom_container_name
-
-        return build_container_name(self.project, self.name, number, one_off)
-
-    def remove_image(self, image_type):
-        if not image_type or image_type == ImageType.none:
-            return False
-        if image_type == ImageType.local and self.options.get('image'):
-            return False
-
-        log.info("Removing image %s", self.image_name)
-        try:
-            self.client.remove_image(self.image_name)
-            return True
-        except APIError as e:
-            log.error("Failed to remove image for service %s: %s", self.name, e)
-            return False
-
-    def specifies_host_port(self):
-        def has_host_port(binding):
-            _, external_bindings = split_port(binding)
-
-            # there are no external bindings
-            if external_bindings is None:
-                return False
-
-            # we only need to check the first binding from the range
-            external_binding = external_bindings[0]
-
-            # non-tuple binding means there is a host port specified
-            if not isinstance(external_binding, tuple):
-                return True
-
-            # extract actual host port from tuple of (host_ip, host_port)
-            _, host_port = external_binding
-            if host_port is not None:
-                return True
-
-            return False
-
-        return any(has_host_port(binding) for binding in self.options.get('ports', []))
-
-    def pull(self, ignore_pull_failures=False):
-        if 'image' not in self.options:
-            return
-
-        repo, tag, separator = parse_repository_tag(self.options['image'])
-        tag = tag or 'latest'
-        log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
-        output = self.client.pull(repo, tag=tag, stream=True)
-
-        try:
-            return progress_stream.get_digest_from_pull(
-                stream_output(output, sys.stdout))
-        except StreamOutputError as e:
-            if not ignore_pull_failures:
-                raise
-            else:
-                log.error(six.text_type(e))
-
-    def push(self, ignore_push_failures=False):
-        if 'image' not in self.options or 'build' not in self.options:
-            return
-
-        repo, tag, separator = parse_repository_tag(self.options['image'])
-        tag = tag or 'latest'
-        log.info('Pushing %s (%s%s%s)...' % (self.name, repo, separator, tag))
-        output = self.client.push(repo, tag=tag, stream=True)
-
-        try:
-            return progress_stream.get_digest_from_push(
-                stream_output(output, sys.stdout))
-        except StreamOutputError as e:
-            if not ignore_push_failures:
-                raise
-            else:
-                log.error(six.text_type(e))
-
-
-def short_id_alias_exists(container, network):
-    aliases = container.get(
-        'NetworkSettings.Networks.{net}.Aliases'.format(net=network)) or ()
-    return container.short_id in aliases
-
-
-class NetworkMode(object):
-    """A `standard` network mode (ex: host, bridge)"""
-
-    service_name = None
-
-    def __init__(self, network_mode):
-        self.network_mode = network_mode
-
-    @property
-    def id(self):
-        return self.network_mode
-
-    mode = id
-
-
-class ContainerNetworkMode(object):
-    """A network mode that uses a container's network stack."""
-
-    service_name = None
-
-    def __init__(self, container):
-        self.container = container
-
-    @property
-    def id(self):
-        return self.container.id
-
-    @property
-    def mode(self):
-        return 'container:' + self.container.id
-
-
-class ServiceNetworkMode(object):
-    """A network mode that uses a service's network stack."""
-
-    def __init__(self, service):
-        self.service = service
-
-    @property
-    def id(self):
-        return self.service.name
-
-    service_name = id
-
-    @property
-    def mode(self):
-        containers = self.service.containers()
-        if containers:
-            return 'container:' + containers[0].id
-
-        log.warn("Service %s is trying to use reuse the network stack "
-                 "of another service that is not running." % (self.id))
-        return None
-
-
-# Names
-
-
-def build_container_name(project, service, number, one_off=False):
-    bits = [project, service]
-    if one_off:
-        bits.append('run')
-    return '_'.join(bits + [str(number)])
-
-
-# Images
-
-def parse_repository_tag(repo_path):
-    """Splits image identification into base image path, tag/digest
-    and it's separator.
-
-    Example:
-
-    >>> parse_repository_tag('user/repo@sha256:digest')
-    ('user/repo', 'sha256:digest', '@')
-    >>> parse_repository_tag('user/repo:v1')
-    ('user/repo', 'v1', ':')
-    """
-    tag_separator = ":"
-    digest_separator = "@"
-
-    if digest_separator in repo_path:
-        repo, tag = repo_path.rsplit(digest_separator, 1)
-        return repo, tag, digest_separator
-
-    repo, tag = repo_path, ""
-    if tag_separator in repo_path:
-        repo, tag = repo_path.rsplit(tag_separator, 1)
-        if "/" in tag:
-            repo, tag = repo_path, ""
-
-    return repo, tag, tag_separator
-
-
-# Volumes
-
-
-def merge_volume_bindings(volumes, previous_container):
-    """Return a list of volume bindings for a container. Container data volumes
-    are replaced by those from the previous container.
-    """
-    affinity = {}
-
-    volume_bindings = dict(
-        build_volume_binding(volume)
-        for volume in volumes
-        if volume.external)
-
-    if previous_container:
-        old_volumes = get_container_data_volumes(previous_container, volumes)
-        warn_on_masked_volume(volumes, old_volumes, previous_container.service)
-        volume_bindings.update(
-            build_volume_binding(volume) for volume in old_volumes)
-
-        if old_volumes:
-            affinity = {'affinity:container': '=' + previous_container.id}
-
-    return list(volume_bindings.values()), affinity
-
-
-def get_container_data_volumes(container, volumes_option):
-    """Find the container data volumes that are in `volumes_option`, and return
-    a mapping of volume bindings for those volumes.
-    """
-    volumes = []
-    volumes_option = volumes_option or []
-
-    container_mounts = dict(
-        (mount['Destination'], mount)
-        for mount in container.get('Mounts') or {}
-    )
-
-    image_volumes = [
-        VolumeSpec.parse(volume)
-        for volume in
-        container.image_config['ContainerConfig'].get('Volumes') or {}
-    ]
-
-    for volume in set(volumes_option + image_volumes):
-        # No need to preserve host volumes
-        if volume.external:
-            continue
-
-        mount = container_mounts.get(volume.internal)
-
-        # New volume, doesn't exist in the old container
-        if not mount:
-            continue
-
-        # Volume was previously a host volume, now it's a container volume
-        if not mount.get('Name'):
-            continue
-
-        # Copy existing volume from old container
-        volume = volume._replace(external=mount['Name'])
-        volumes.append(volume)
-
-    return volumes
-
-
-def warn_on_masked_volume(volumes_option, container_volumes, service):
-    container_volumes = dict(
-        (volume.internal, volume.external)
-        for volume in container_volumes)
-
-    for volume in volumes_option:
-        if (
-            volume.external and
-            volume.internal in container_volumes and
-            container_volumes.get(volume.internal) != volume.external
-        ):
-            log.warn((
-                "Service \"{service}\" is using volume \"{volume}\" from the "
-                "previous container. Host mapping \"{host_path}\" has no effect. "
-                "Remove the existing containers (with `docker-compose rm {service}`) "
-                "to use the host volume mapping."
-            ).format(
-                service=service,
-                volume=volume.internal,
-                host_path=volume.external))
-
-
-def build_volume_binding(volume_spec):
-    return volume_spec.internal, volume_spec.repr()
-
-
-def build_volume_from(volume_from_spec):
-    """
-    volume_from can be either a service or a container. We want to return the
-    container.id and format it into a string complete with the mode.
-    """
-    if isinstance(volume_from_spec.source, Service):
-        containers = volume_from_spec.source.containers(stopped=True)
-        if not containers:
-            return "{}:{}".format(
-                volume_from_spec.source.create_container().id,
-                volume_from_spec.mode)
-
-        container = containers[0]
-        return "{}:{}".format(container.id, volume_from_spec.mode)
-    elif isinstance(volume_from_spec.source, Container):
-        return "{}:{}".format(volume_from_spec.source.id, volume_from_spec.mode)
-
-
-# Labels
-
-
-def build_container_labels(label_options, service_labels, number, config_hash):
-    labels = dict(label_options or {})
-    labels.update(label.split('=', 1) for label in service_labels)
-    labels[LABEL_CONTAINER_NUMBER] = str(number)
-    labels[LABEL_VERSION] = __version__
-
-    if config_hash:
-        log.debug("Added config hash: %s" % config_hash)
-        labels[LABEL_CONFIG_HASH] = config_hash
-
-    return labels
-
-
-# Ulimits
-
-
-def build_ulimits(ulimit_config):
-    if not ulimit_config:
-        return None
-    ulimits = []
-    for limit_name, soft_hard_values in six.iteritems(ulimit_config):
-        if isinstance(soft_hard_values, six.integer_types):
-            ulimits.append({'name': limit_name, 'soft': soft_hard_values, 'hard': soft_hard_values})
-        elif isinstance(soft_hard_values, dict):
-            ulimit_dict = {'name': limit_name}
-            ulimit_dict.update(soft_hard_values)
-            ulimits.append(ulimit_dict)
-
-    return ulimits
-
-
-def get_log_config(logging_dict):
-    log_driver = logging_dict.get('driver', "") if logging_dict else ""
-    log_options = logging_dict.get('options', None) if logging_dict else None
-    return LogConfig(
-        type=log_driver,
-        config=log_options
-    )
-
-
-# TODO: remove once fix is available in docker-py
-def format_environment(environment):
-    def format_env(key, value):
-        if value is None:
-            return key
-        return '{key}={value}'.format(key=key, value=value)
-    return [format_env(*item) for item in environment.items()]
-
-# Ports
-
-
-def build_container_ports(container_options, options):
-    ports = []
-    all_ports = container_options.get('ports', []) + options.get('expose', [])
-    for port_range in all_ports:
-        internal_range, _ = split_port(port_range)
-        for port in internal_range:
-            port = str(port)
-            if '/' in port:
-                port = tuple(port.split('/'))
-            ports.append(port)
-    return ports
diff --git a/env2/lib/python2.7/site-packages/compose/state.py b/env2/lib/python2.7/site-packages/compose/state.py
deleted file mode 100644
index e69de29..0000000
--- a/env2/lib/python2.7/site-packages/compose/state.py
+++ /dev/null
diff --git a/env2/lib/python2.7/site-packages/compose/utils.py b/env2/lib/python2.7/site-packages/compose/utils.py
deleted file mode 100644
index 925a8e7..0000000
--- a/env2/lib/python2.7/site-packages/compose/utils.py
+++ /dev/null
@@ -1,98 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import codecs
-import hashlib
-import json
-import json.decoder
-
-import six
-
-
-json_decoder = json.JSONDecoder()
-
-
-def get_output_stream(stream):
-    if six.PY3:
-        return stream
-    return codecs.getwriter('utf-8')(stream)
-
-
-def stream_as_text(stream):
-    """Given a stream of bytes or text, if any of the items in the stream
-    are bytes convert them to text.
-
-    This function can be removed once docker-py returns text streams instead
-    of byte streams.
-    """
-    for data in stream:
-        if not isinstance(data, six.text_type):
-            data = data.decode('utf-8', 'replace')
-        yield data
-
-
-def line_splitter(buffer, separator=u'\n'):
-    index = buffer.find(six.text_type(separator))
-    if index == -1:
-        return None
-    return buffer[:index + 1], buffer[index + 1:]
-
-
-def split_buffer(stream, splitter=None, decoder=lambda a: a):
-    """Given a generator which yields strings and a splitter function,
-    joins all input, splits on the separator and yields each chunk.
-
-    Unlike string.split(), each chunk includes the trailing
-    separator, except for the last one if none was found on the end
-    of the input.
-    """
-    splitter = splitter or line_splitter
-    buffered = six.text_type('')
-
-    for data in stream_as_text(stream):
-        buffered += data
-        while True:
-            buffer_split = splitter(buffered)
-            if buffer_split is None:
-                break
-
-            item, buffered = buffer_split
-            yield item
-
-    if buffered:
-        yield decoder(buffered)
-
-
-def json_splitter(buffer):
-    """Attempt to parse a json object from a buffer. If there is at least one
-    object, return it and the rest of the buffer, otherwise return None.
-    """
-    try:
-        obj, index = json_decoder.raw_decode(buffer)
-        rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
-        return obj, rest
-    except ValueError:
-        return None
-
-
-def json_stream(stream):
-    """Given a stream of text, return a stream of json objects.
-    This handles streams which are inconsistently buffered (some entries may
-    be newline delimited, and others are not).
-    """
-    return split_buffer(stream, json_splitter, json_decoder.decode)
-
-
-def json_hash(obj):
-    dump = json.dumps(obj, sort_keys=True, separators=(',', ':'))
-    h = hashlib.sha256()
-    h.update(dump.encode('utf8'))
-    return h.hexdigest()
-
-
-def microseconds_from_time_nano(time_nano):
-    return int(time_nano % 1000000000 / 1000)
-
-
-def build_string_dict(source_dict):
-    return dict((k, str(v if v is not None else '')) for k, v in source_dict.items())
diff --git a/env2/lib/python2.7/site-packages/compose/volume.py b/env2/lib/python2.7/site-packages/compose/volume.py
deleted file mode 100644
index f440ba4..0000000
--- a/env2/lib/python2.7/site-packages/compose/volume.py
+++ /dev/null
@@ -1,135 +0,0 @@
-from __future__ import absolute_import
-from __future__ import unicode_literals
-
-import logging
-
-from docker.errors import NotFound
-
-from .config import ConfigurationError
-
-log = logging.getLogger(__name__)
-
-
-class Volume(object):
-    def __init__(self, client, project, name, driver=None, driver_opts=None,
-                 external_name=None):
-        self.client = client
-        self.project = project
-        self.name = name
-        self.driver = driver
-        self.driver_opts = driver_opts
-        self.external_name = external_name
-
-    def create(self):
-        return self.client.create_volume(
-            self.full_name, self.driver, self.driver_opts
-        )
-
-    def remove(self):
-        if self.external:
-            log.info("Volume %s is external, skipping", self.full_name)
-            return
-        log.info("Removing volume %s", self.full_name)
-        return self.client.remove_volume(self.full_name)
-
-    def inspect(self):
-        return self.client.inspect_volume(self.full_name)
-
-    def exists(self):
-        try:
-            self.inspect()
-        except NotFound:
-            return False
-        return True
-
-    @property
-    def external(self):
-        return bool(self.external_name)
-
-    @property
-    def full_name(self):
-        if self.external_name:
-            return self.external_name
-        return '{0}_{1}'.format(self.project, self.name)
-
-
-class ProjectVolumes(object):
-
-    def __init__(self, volumes):
-        self.volumes = volumes
-
-    @classmethod
-    def from_config(cls, name, config_data, client):
-        config_volumes = config_data.volumes or {}
-        volumes = {
-            vol_name: Volume(
-                client=client,
-                project=name,
-                name=vol_name,
-                driver=data.get('driver'),
-                driver_opts=data.get('driver_opts'),
-                external_name=data.get('external_name')
-            )
-            for vol_name, data in config_volumes.items()
-        }
-        return cls(volumes)
-
-    def remove(self):
-        for volume in self.volumes.values():
-            try:
-                volume.remove()
-            except NotFound:
-                log.warn("Volume %s not found.", volume.full_name)
-
-    def initialize(self):
-        try:
-            for volume in self.volumes.values():
-                volume_exists = volume.exists()
-                if volume.external:
-                    log.debug(
-                        'Volume {0} declared as external. No new '
-                        'volume will be created.'.format(volume.name)
-                    )
-                    if not volume_exists:
-                        raise ConfigurationError(
-                            'Volume {name} declared as external, but could'
-                            ' not be found. Please create the volume manually'
-                            ' using `{command}{name}` and try again.'.format(
-                                name=volume.full_name,
-                                command='docker volume create --name='
-                            )
-                        )
-                    continue
-
-                if not volume_exists:
-                    log.info(
-                        'Creating volume "{0}" with {1} driver'.format(
-                            volume.full_name, volume.driver or 'default'
-                        )
-                    )
-                    volume.create()
-                else:
-                    driver = volume.inspect()['Driver']
-                    if volume.driver is not None and driver != volume.driver:
-                        raise ConfigurationError(
-                            'Configuration for volume {0} specifies driver '
-                            '{1}, but a volume with the same name uses a '
-                            'different driver ({3}). If you wish to use the '
-                            'new configuration, please remove the existing '
-                            'volume "{2}" first:\n'
-                            '$ docker volume rm {2}'.format(
-                                volume.name, volume.driver, volume.full_name,
-                                volume.inspect()['Driver']
-                            )
-                        )
-        except NotFound:
-            raise ConfigurationError(
-                'Volume %s specifies nonexistent driver %s' % (volume.name, volume.driver)
-            )
-
-    def namespace_spec(self, volume_spec):
-        if not volume_spec.is_named_volume:
-            return volume_spec
-
-        volume = self.volumes[volume_spec.external]
-        return volume_spec._replace(external=volume.full_name)
diff --git a/env2/lib/python2.7/site-packages/docker/__init__.py b/env2/lib/python2.7/site-packages/docker/__init__.py
deleted file mode 100644
index ad53805..0000000
--- a/env2/lib/python2.7/site-packages/docker/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from .version import version, version_info
-
-__version__ = version
-__title__ = 'docker-py'
-
-from .client import Client, AutoVersionClient, from_env # flake8: noqa
diff --git a/env2/lib/python2.7/site-packages/docker/api/__init__.py b/env2/lib/python2.7/site-packages/docker/api/__init__.py
deleted file mode 100644
index bc7e93c..0000000
--- a/env2/lib/python2.7/site-packages/docker/api/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# flake8: noqa
-from .build import BuildApiMixin
-from .container import ContainerApiMixin
-from .daemon import DaemonApiMixin
-from .exec_api import ExecApiMixin
-from .image import ImageApiMixin
-from .network import NetworkApiMixin
-from .service import ServiceApiMixin
-from .swarm import SwarmApiMixin
-from .volume import VolumeApiMixin
diff --git a/env2/lib/python2.7/site-packages/docker/api/build.py b/env2/lib/python2.7/site-packages/docker/api/build.py
deleted file mode 100644
index 7403716..0000000
--- a/env2/lib/python2.7/site-packages/docker/api/build.py
+++ /dev/null
@@ -1,148 +0,0 @@
-import logging
-import os
-import re
-import json
-
-from .. import constants
-from .. import errors
-from .. import auth
-from .. import utils
-
-
-log = logging.getLogger(__name__)
-
-
-class BuildApiMixin(object):
-    def build(self, path=None, tag=None, quiet=False, fileobj=None,
-              nocache=False, rm=False, stream=False, timeout=None,
-              custom_context=False, encoding=None, pull=False,
-              forcerm=False, dockerfile=None, container_limits=None,
-              decode=False, buildargs=None, gzip=False):
-        remote = context = None
-        headers = {}
-        container_limits = container_limits or {}
-        if path is None and fileobj is None:
-            raise TypeError("Either path or fileobj needs to be provided.")
-        if gzip and encoding is not None:
-            raise errors.DockerException(
-                'Can not use custom encoding if gzip is enabled'
-            )
-
-        for key in container_limits.keys():
-            if key not in constants.CONTAINER_LIMITS_KEYS:
-                raise errors.DockerException(
-                    'Invalid container_limits key {0}'.format(key)
-                )
-
-        if custom_context:
-            if not fileobj:
-                raise TypeError("You must specify fileobj with custom_context")
-            context = fileobj
-        elif fileobj is not None:
-            context = utils.mkbuildcontext(fileobj)
-        elif path.startswith(('http://', 'https://',
-                              'git://', 'github.com/', 'git@')):
-            remote = path
-        elif not os.path.isdir(path):
-            raise TypeError("You must specify a directory to build in path")
-        else:
-            dockerignore = os.path.join(path, '.dockerignore')
-            exclude = None
-            if os.path.exists(dockerignore):
-                with open(dockerignore, 'r') as f:
-                    exclude = list(filter(bool, f.read().splitlines()))
-            context = utils.tar(
-                path, exclude=exclude, dockerfile=dockerfile, gzip=gzip
-            )
-            encoding = 'gzip' if gzip else encoding
-
-        if utils.compare_version('1.8', self._version) >= 0:
-            stream = True
-
-        if dockerfile and utils.compare_version('1.17', self._version) < 0:
-            raise errors.InvalidVersion(
-                'dockerfile was only introduced in API version 1.17'
-            )
-
-        if utils.compare_version('1.19', self._version) < 0:
-            pull = 1 if pull else 0
-
-        u = self._url('/build')
-        params = {
-            't': tag,
-            'remote': remote,
-            'q': quiet,
-            'nocache': nocache,
-            'rm': rm,
-            'forcerm': forcerm,
-            'pull': pull,
-            'dockerfile': dockerfile,
-        }
-        params.update(container_limits)
-
-        if buildargs:
-            if utils.version_gte(self._version, '1.21'):
-                params.update({'buildargs': json.dumps(buildargs)})
-            else:
-                raise errors.InvalidVersion(
-                    'buildargs was only introduced in API version 1.21'
-                )
-
-        if context is not None:
-            headers = {'Content-Type': 'application/tar'}
-            if encoding:
-                headers['Content-Encoding'] = encoding
-
-        if utils.compare_version('1.9', self._version) >= 0:
-            self._set_auth_headers(headers)
-
-        response = self._post(
-            u,
-            data=context,
-            params=params,
-            headers=headers,
-            stream=stream,
-            timeout=timeout,
-        )
-
-        if context is not None and not custom_context:
-            context.close()
-
-        if stream:
-            return self._stream_helper(response, decode=decode)
-        else:
-            output = self._result(response)
-            srch = r'Successfully built ([0-9a-f]+)'
-            match = re.search(srch, output)
-            if not match:
-                return None, output
-            return match.group(1), output
-
-    def _set_auth_headers(self, headers):
-        log.debug('Looking for auth config')
-
-        # If we don't have any auth data so far, try reloading the config
-        # file one more time in case anything showed up in there.
-        if not self._auth_configs:
-            log.debug("No auth config in memory - loading from filesystem")
-            self._auth_configs = auth.load_config()
-
-        # Send the full auth configuration (if any exists), since the build
-        # could use any (or all) of the registries.
-        if self._auth_configs:
-            log.debug(
-                'Sending auth config ({0})'.format(
-                    ', '.join(repr(k) for k in self._auth_configs.keys())
-                )
-            )
-
-            if utils.compare_version('1.19', self._version) >= 0:
-                headers['X-Registry-Config'] = auth.encode_header(
-                    self._auth_configs
-                )
-            else:
-                headers['X-Registry-Config'] = auth.encode_header({
-                    'configs': self._auth_configs
-                })
-        else:
-            log.debug('No auth config found')
diff --git a/env2/lib/python2.7/site-packages/docker/api/container.py b/env2/lib/python2.7/site-packages/docker/api/container.py
deleted file mode 100644
index b8507d8..0000000
--- a/env2/lib/python2.7/site-packages/docker/api/container.py
+++ /dev/null
@@ -1,460 +0,0 @@
-import six
-import warnings
-from datetime import datetime
-
-from .. import errors
-from .. import utils
-from ..utils.utils import create_networking_config, create_endpoint_config
-
-
-class ContainerApiMixin(object):
-    @utils.check_resource
-    def attach(self, container, stdout=True, stderr=True,
-               stream=False, logs=False):
-        params = {
-            'logs': logs and 1 or 0,
-            'stdout': stdout and 1 or 0,
-            'stderr': stderr and 1 or 0,
-            'stream': stream and 1 or 0
-        }
-
-        headers = {
-            'Connection': 'Upgrade',
-            'Upgrade': 'tcp'
-        }
-
-        u = self._url("/containers/{0}/attach", container)
-        response = self._post(u, headers=headers, params=params, stream=stream)
-
-        return self._read_from_socket(response, stream)
-
-    @utils.check_resource
-    def attach_socket(self, container, params=None, ws=False):
-        if params is None:
-            params = {
-                'stdout': 1,
-                'stderr': 1,
-                'stream': 1
-            }
-
-        if ws:
-            return self._attach_websocket(container, params)
-
-        headers = {
-            'Connection': 'Upgrade',
-            'Upgrade': 'tcp'
-        }
-
-        u = self._url("/containers/{0}/attach", container)
-        return self._get_raw_response_socket(
-            self.post(
-                u, None, params=self._attach_params(params), stream=True,
-                headers=headers
-            )
-        )
-
-    @utils.check_resource
-    def commit(self, container, repository=None, tag=None, message=None,
-               author=None, changes=None, conf=None):
-        params = {
-            'container': container,
-            'repo': repository,
-            'tag': tag,
-            'comment': message,
-            'author': author,
-            'changes': changes
-        }
-        u = self._url("/commit")
-        return self._result(self._post_json(u, data=conf, params=params),
-                            json=True)
-
-    def containers(self, quiet=False, all=False, trunc=False, latest=False,
-                   since=None, before=None, limit=-1, size=False,
-                   filters=None):
-        params = {
-            'limit': 1 if latest else limit,
-            'all': 1 if all else 0,
-            'size': 1 if size else 0,
-            'trunc_cmd': 1 if trunc else 0,
-            'since': since,
-            'before': before
-        }
-        if filters:
-            params['filters'] = utils.convert_filters(filters)
-        u = self._url("/containers/json")
-        res = self._result(self._get(u, params=params), True)
-
-        if quiet:
-            return [{'Id': x['Id']} for x in res]
-        if trunc:
-            for x in res:
-                x['Id'] = x['Id'][:12]
-        return res
-
-    @utils.check_resource
-    def copy(self, container, resource):
-        if utils.version_gte(self._version, '1.20'):
-            warnings.warn(
-                'Client.copy() is deprecated for API version >= 1.20, '
-                'please use get_archive() instead',
-                DeprecationWarning
-            )
-        res = self._post_json(
-            self._url("/containers/{0}/copy".format(container)),
-            data={"Resource": resource},
-            stream=True
-        )
-        self._raise_for_status(res)
-        return res.raw
-
-    def create_container(self, image, command=None, hostname=None, user=None,
-                         detach=False, stdin_open=False, tty=False,
-                         mem_limit=None, ports=None, environment=None,
-                         dns=None, volumes=None, volumes_from=None,
-                         network_disabled=False, name=None, entrypoint=None,
-                         cpu_shares=None, working_dir=None, domainname=None,
-                         memswap_limit=None, cpuset=None, host_config=None,
-                         mac_address=None, labels=None, volume_driver=None,
-                         stop_signal=None, networking_config=None):
-
-        if isinstance(volumes, six.string_types):
-            volumes = [volumes, ]
-
-        if host_config and utils.compare_version('1.15', self._version) < 0:
-            raise errors.InvalidVersion(
-                'host_config is not supported in API < 1.15'
-            )
-
-        config = self.create_container_config(
-            image, command, hostname, user, detach, stdin_open,
-            tty, mem_limit, ports, environment, dns, volumes, volumes_from,
-            network_disabled, entrypoint, cpu_shares, working_dir, domainname,
-            memswap_limit, cpuset, host_config, mac_address, labels,
-            volume_driver, stop_signal, networking_config,
-        )
-        return self.create_container_from_config(config, name)
-
-    def create_container_config(self, *args, **kwargs):
-        return utils.create_container_config(self._version, *args, **kwargs)
-
-    def create_container_from_config(self, config, name=None):
-        u = self._url("/containers/create")
-        params = {
-            'name': name
-        }
-        res = self._post_json(u, data=config, params=params)
-        return self._result(res, True)
-
-    def create_host_config(self, *args, **kwargs):
-        if not kwargs:
-            kwargs = {}
-        if 'version' in kwargs:
-            raise TypeError(
-                "create_host_config() got an unexpected "
-                "keyword argument 'version'"
-            )
-        kwargs['version'] = self._version
-        return utils.create_host_config(*args, **kwargs)
-
-    def create_networking_config(self, *args, **kwargs):
-        return create_networking_config(*args, **kwargs)
-
-    def create_endpoint_config(self, *args, **kwargs):
-        return create_endpoint_config(self._version, *args, **kwargs)
-
-    @utils.check_resource
-    def diff(self, container):
-        return self._result(
-            self._get(self._url("/containers/{0}/changes", container)), True
-        )
-
-    @utils.check_resource
-    def export(self, container):
-        res = self._get(
-            self._url("/containers/{0}/export", container), stream=True
-        )
-        self._raise_for_status(res)
-        return res.raw
-
-    @utils.check_resource
-    @utils.minimum_version('1.20')
-    def get_archive(self, container, path):
-        params = {
-            'path': path
-        }
-        url = self._url('/containers/{0}/archive', container)
-        res = self._get(url, params=params, stream=True)
-        self._raise_for_status(res)
-        encoded_stat = res.headers.get('x-docker-container-path-stat')
-        return (
-            res.raw,
-            utils.decode_json_header(encoded_stat) if encoded_stat else None
-        )
-
-    @utils.check_resource
-    def inspect_container(self, container):
-        return self._result(
-            self._get(self._url("/containers/{0}/json", container)), True
-        )
-
-    @utils.check_resource
-    def kill(self, container, signal=None):
-        url = self._url("/containers/{0}/kill", container)
-        params = {}
-        if signal is not None:
-            if not isinstance(signal, six.string_types):
-                signal = int(signal)
-            params['signal'] = signal
-        res = self._post(url, params=params)
-
-        self._raise_for_status(res)
-
-    @utils.check_resource
-    def logs(self, container, stdout=True, stderr=True, stream=False,
-             timestamps=False, tail='all', since=None, follow=None):
-        if utils.compare_version('1.11', self._version) >= 0:
-            if follow is None:
-                follow = stream
-            params = {'stderr': stderr and 1 or 0,
-                      'stdout': stdout and 1 or 0,
-                      'timestamps': timestamps and 1 or 0,
-                      'follow': follow and 1 or 0,
-                      }
-            if utils.compare_version('1.13', self._version) >= 0:
-                if tail != 'all' and (not isinstance(tail, int) or tail < 0):
-                    tail = 'all'
-                params['tail'] = tail
-
-            if since is not None:
-                if utils.compare_version('1.19', self._version) < 0:
-                    raise errors.InvalidVersion(
-                        'since is not supported in API < 1.19'
-                    )
-                else:
-                    if isinstance(since, datetime):
-                        params['since'] = utils.datetime_to_timestamp(since)
-                    elif (isinstance(since, int) and since > 0):
-                        params['since'] = since
-            url = self._url("/containers/{0}/logs", container)
-            res = self._get(url, params=params, stream=stream)
-            return self._get_result(container, stream, res)
-        return self.attach(
-            container,
-            stdout=stdout,
-            stderr=stderr,
-            stream=stream,
-            logs=True
-        )
-
-    @utils.check_resource
-    def pause(self, container):
-        url = self._url('/containers/{0}/pause', container)
-        res = self._post(url)
-        self._raise_for_status(res)
-
-    @utils.check_resource
-    def port(self, container, private_port):
-        res = self._get(self._url("/containers/{0}/json", container))
-        self._raise_for_status(res)
-        json_ = res.json()
-        private_port = str(private_port)
-        h_ports = None
-
-        # Port settings is None when the container is running with
-        # network_mode=host.
-        port_settings = json_.get('NetworkSettings', {}).get('Ports')
-        if port_settings is None:
-            return None
-
-        if '/' in private_port:
-            return port_settings.get(private_port)
-
-        h_ports = port_settings.get(private_port + '/tcp')
-        if h_ports is None:
-            h_ports = port_settings.get(private_port + '/udp')
-
-        return h_ports
-
-    @utils.check_resource
-    @utils.minimum_version('1.20')
-    def put_archive(self, container, path, data):
-        params = {'path': path}
-        url = self._url('/containers/{0}/archive', container)
-        res = self._put(url, params=params, data=data)
-        self._raise_for_status(res)
-        return res.status_code == 200
-
-    @utils.check_resource
-    def remove_container(self, container, v=False, link=False, force=False):
-        params = {'v': v, 'link': link, 'force': force}
-        res = self._delete(
-            self._url("/containers/{0}", container), params=params
-        )
-        self._raise_for_status(res)
-
-    @utils.minimum_version('1.17')
-    @utils.check_resource
-    def rename(self, container, name):
-        url = self._url("/containers/{0}/rename", container)
-        params = {'name': name}
-        res = self._post(url, params=params)
-        self._raise_for_status(res)
-
-    @utils.check_resource
-    def resize(self, container, height, width):
-        params = {'h': height, 'w': width}
-        url = self._url("/containers/{0}/resize", container)
-        res = self._post(url, params=params)
-        self._raise_for_status(res)
-
-    @utils.check_resource
-    def restart(self, container, timeout=10):
-        params = {'t': timeout}
-        url = self._url("/containers/{0}/restart", container)
-        res = self._post(url, params=params)
-        self._raise_for_status(res)
-
-    @utils.check_resource
-    def start(self, container, binds=None, port_bindings=None, lxc_conf=None,
-              publish_all_ports=None, links=None, privileged=None,
-              dns=None, dns_search=None, volumes_from=None, network_mode=None,
-              restart_policy=None, cap_add=None, cap_drop=None, devices=None,
-              extra_hosts=None, read_only=None, pid_mode=None, ipc_mode=None,
-              security_opt=None, ulimits=None):
-
-        if utils.compare_version('1.10', self._version) < 0:
-            if dns is not None:
-                raise errors.InvalidVersion(
-                    'dns is only supported for API version >= 1.10'
-                )
-            if volumes_from is not None:
-                raise errors.InvalidVersion(
-                    'volumes_from is only supported for API version >= 1.10'
-                )
-
-        if utils.compare_version('1.15', self._version) < 0:
-            if security_opt is not None:
-                raise errors.InvalidVersion(
-                    'security_opt is only supported for API version >= 1.15'
-                )
-            if ipc_mode:
-                raise errors.InvalidVersion(
-                    'ipc_mode is only supported for API version >= 1.15'
-                )
-
-        if utils.compare_version('1.17', self._version) < 0:
-            if read_only is not None:
-                raise errors.InvalidVersion(
-                    'read_only is only supported for API version >= 1.17'
-                )
-            if pid_mode is not None:
-                raise errors.InvalidVersion(
-                    'pid_mode is only supported for API version >= 1.17'
-                )
-
-        if utils.compare_version('1.18', self._version) < 0:
-            if ulimits is not None:
-                raise errors.InvalidVersion(
-                    'ulimits is only supported for API version >= 1.18'
-                )
-
-        start_config_kwargs = dict(
-            binds=binds, port_bindings=port_bindings, lxc_conf=lxc_conf,
-            publish_all_ports=publish_all_ports, links=links, dns=dns,
-            privileged=privileged, dns_search=dns_search, cap_add=cap_add,
-            cap_drop=cap_drop, volumes_from=volumes_from, devices=devices,
-            network_mode=network_mode, restart_policy=restart_policy,
-            extra_hosts=extra_hosts, read_only=read_only, pid_mode=pid_mode,
-            ipc_mode=ipc_mode, security_opt=security_opt, ulimits=ulimits
-        )
-        start_config = None
-
-        if any(v is not None for v in start_config_kwargs.values()):
-            if utils.compare_version('1.15', self._version) > 0:
-                warnings.warn(
-                    'Passing host config parameters in start() is deprecated. '
-                    'Please use host_config in create_container instead!',
-                    DeprecationWarning
-                )
-            start_config = self.create_host_config(**start_config_kwargs)
-
-        url = self._url("/containers/{0}/start", container)
-        res = self._post_json(url, data=start_config)
-        self._raise_for_status(res)
-
-    @utils.minimum_version('1.17')
-    @utils.check_resource
-    def stats(self, container, decode=None, stream=True):
-        url = self._url("/containers/{0}/stats", container)
-        if stream:
-            return self._stream_helper(self._get(url, stream=True),
-                                       decode=decode)
-        else:
-            return self._result(self._get(url, params={'stream': False}),
-                                json=True)
-
-    @utils.check_resource
-    def stop(self, container, timeout=10):
-        params = {'t': timeout}
-        url = self._url("/containers/{0}/stop", container)
-
-        res = self._post(url, params=params,
-                         timeout=(timeout + (self.timeout or 0)))
-        self._raise_for_status(res)
-
-    @utils.check_resource
-    def top(self, container, ps_args=None):
-        u = self._url("/containers/{0}/top", container)
-        params = {}
-        if ps_args is not None:
-            params['ps_args'] = ps_args
-        return self._result(self._get(u, params=params), True)
-
-    @utils.check_resource
-    def unpause(self, container):
-        url = self._url('/containers/{0}/unpause', container)
-        res = self._post(url)
-        self._raise_for_status(res)
-
-    @utils.minimum_version('1.22')
-    @utils.check_resource
-    def update_container(
-        self, container, blkio_weight=None, cpu_period=None, cpu_quota=None,
-        cpu_shares=None, cpuset_cpus=None, cpuset_mems=None, mem_limit=None,
-        mem_reservation=None, memswap_limit=None, kernel_memory=None
-    ):
-        url = self._url('/containers/{0}/update', container)
-        data = {}
-        if blkio_weight:
-            data['BlkioWeight'] = blkio_weight
-        if cpu_period:
-            data['CpuPeriod'] = cpu_period
-        if cpu_shares:
-            data['CpuShares'] = cpu_shares
-        if cpu_quota:
-            data['CpuQuota'] = cpu_quota
-        if cpuset_cpus:
-            data['CpusetCpus'] = cpuset_cpus
-        if cpuset_mems:
-            data['CpusetMems'] = cpuset_mems
-        if mem_limit:
-            data['Memory'] = utils.parse_bytes(mem_limit)
-        if mem_reservation:
-            data['MemoryReservation'] = utils.parse_bytes(mem_reservation)
-        if memswap_limit:
-            data['MemorySwap'] = utils.parse_bytes(memswap_limit)
-        if kernel_memory:
-            data['KernelMemory'] = utils.parse_bytes(kernel_memory)
-
-        res = self._post_json(url, data=data)
-        return self._result(res, True)
-
-    @utils.check_resource
-    def wait(self, container, timeout=None):
-        url = self._url("/containers/{0}/wait", container)
-        res = self._post(url, timeout=timeout)
-        self._raise_for_status(res)
-        json_ = res.json()
-        if 'StatusCode' in json_:
-            return json_['StatusCode']
-        return -1
diff --git a/env2/lib/python2.7/site-packages/docker/api/daemon.py b/env2/lib/python2.7/site-packages/docker/api/daemon.py
deleted file mode 100644
index 9ebe73c..0000000
--- a/env2/lib/python2.7/site-packages/docker/api/daemon.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import os
-import warnings
-from datetime import datetime
-
-from ..auth import auth
-from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
-from ..utils import utils
-
-
-class DaemonApiMixin(object):
-    def events(self, since=None, until=None, filters=None, decode=None):
-        if isinstance(since, datetime):
-            since = utils.datetime_to_timestamp(since)
-
-        if isinstance(until, datetime):
-            until = utils.datetime_to_timestamp(until)
-
-        if filters:
-            filters = utils.convert_filters(filters)
-
-        params = {
-            'since': since,
-            'until': until,
-            'filters': filters
-        }
-
-        return self._stream_helper(
-            self.get(self._url('/events'), params=params, stream=True),
-            decode=decode
-        )
-
-    def info(self):
-        return self._result(self._get(self._url("/info")), True)
-
-    def login(self, username, password=None, email=None, registry=None,
-              reauth=False, insecure_registry=False, dockercfg_path=None):
-        if insecure_registry:
-            warnings.warn(
-                INSECURE_REGISTRY_DEPRECATION_WARNING.format('login()'),
-                DeprecationWarning
-            )
-
-        # If we don't have any auth data so far, try reloading the config file
-        # one more time in case anything showed up in there.
-        # If dockercfg_path is passed check to see if the config file exists,
-        # if so load that config.
-        if dockercfg_path and os.path.exists(dockercfg_path):
-            self._auth_configs = auth.load_config(dockercfg_path)
-        elif not self._auth_configs:
-            self._auth_configs = auth.load_config()
-
-        authcfg = auth.resolve_authconfig(self._auth_configs, registry)
-        # If we found an existing auth config for this registry and username
-        # combination, we can return it immediately unless reauth is requested.
-        if authcfg and authcfg.get('username', None) == username \
-                and not reauth:
-            return authcfg
-
-        req_data = {
-            'username': username,
-            'password': password,
-            'email': email,
-            'serveraddress': registry,
-        }
-
-        response = self._post_json(self._url('/auth'), data=req_data)
-        if response.status_code == 200:
-            self._auth_configs[registry or auth.INDEX_NAME] = req_data
-        return self._result(response, json=True)
-
-    def ping(self):
-        return self._result(self._get(self._url('/_ping')))
-
-    def version(self, api_version=True):
-        url = self._url("/version", versioned_api=api_version)
-        return self._result(self._get(url), json=True)
diff --git a/env2/lib/python2.7/site-packages/docker/api/exec_api.py b/env2/lib/python2.7/site-packages/docker/api/exec_api.py
deleted file mode 100644
index 6e49996..0000000
--- a/env2/lib/python2.7/site-packages/docker/api/exec_api.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import six
-
-from .. import errors
-from .. import utils
-
-
-class ExecApiMixin(object):
-    @utils.minimum_version('1.15')
-    @utils.check_resource
-    def exec_create(self, container, cmd, stdout=True, stderr=True,
-                    stdin=False, tty=False, privileged=False, user=''):
-        if privileged and utils.compare_version('1.19', self._version) < 0:
-            raise errors.InvalidVersion(
-                'Privileged exec is not supported in API < 1.19'
-            )
-        if user and utils.compare_version('1.19', self._version) < 0:
-            raise errors.InvalidVersion(
-                'User-specific exec is not supported in API < 1.19'
-            )
-        if isinstance(cmd, six.string_types):
-            cmd = utils.split_command(cmd)
-
-        data = {
-            'Container': container,
-            'User': user,
-            'Privileged': privileged,
-            'Tty': tty,
-            'AttachStdin': stdin,
-            'AttachStdout': stdout,
-            'AttachStderr': stderr,
-            'Cmd': cmd
-        }
-
-        url = self._url('/containers/{0}/exec', container)
-        res = self._post_json(url, data=data)
-        return self._result(res, True)
-
-    @utils.minimum_version('1.16')
-    def exec_inspect(self, exec_id):
-        if isinstance(exec_id, dict):
-            exec_id = exec_id.get('Id')
-        res = self._get(self._url("/exec/{0}/json", exec_id))
-        return self._result(res, True)
-
-    @utils.minimum_version('1.15')
-    def exec_resize(self, exec_id, height=None, width=None):
-        if isinstance(exec_id, dict):
-            exec_id = exec_id.get('Id')
-
-        params = {'h': height, 'w': width}
-        url = self._url("/exec/{0}/resize", exec_id)
-        res = self._post(url, params=params)
-        self._raise_for_status(res)
-
-    @utils.minimum_version('1.15')
-    def exec_start(self, exec_id, detach=False, tty=False, stream=False,
-                   socket=False):
-        # we want opened socket if socket == True
-        if isinstance(exec_id, dict):
-            exec_id = exec_id.get('Id')
-
-        data = {
-            'Tty': tty,
-            'Detach': detach
-        }
-
-        headers = {} if detach else {
-            'Connection': 'Upgrade',
-            'Upgrade': 'tcp'
-        }
-
-        res = self._post_json(
-            self._url('/exec/{0}/start', exec_id),
-            headers=headers,
-            data=data,
-            stream=True
-        )
-
-        if socket:
-            return self._get_raw_response_socket(res)
-        return self._read_from_socket(res, stream)
diff --git a/env2/lib/python2.7/site-packages/docker/api/image.py b/env2/lib/python2.7/site-packages/docker/api/image.py
deleted file mode 100644
index 7f25f9d..0000000
--- a/env2/lib/python2.7/site-packages/docker/api/image.py
+++ /dev/null
@@ -1,270 +0,0 @@
-import logging
-import os
-import six
-import warnings
-
-from ..auth import auth
-from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
-from .. import utils
-from .. import errors
-
-log = logging.getLogger(__name__)
-
-
-class ImageApiMixin(object):
-
-    @utils.check_resource
-    def get_image(self, image):
-        res = self._get(self._url("/images/{0}/get", image), stream=True)
-        self._raise_for_status(res)
-        return res.raw
-
-    @utils.check_resource
-    def history(self, image):
-        res = self._get(self._url("/images/{0}/history", image))
-        return self._result(res, True)
-
-    def images(self, name=None, quiet=False, all=False, viz=False,
-               filters=None):
-        if viz:
-            if utils.compare_version('1.7', self._version) >= 0:
-                raise Exception('Viz output is not supported in API >= 1.7!')
-            return self._result(self._get(self._url("images/viz")))
-        params = {
-            'filter': name,
-            'only_ids': 1 if quiet else 0,
-            'all': 1 if all else 0,
-        }
-        if filters:
-            params['filters'] = utils.convert_filters(filters)
-        res = self._result(self._get(self._url("/images/json"), params=params),
-                           True)
-        if quiet:
-            return [x['Id'] for x in res]
-        return res
-
-    def import_image(self, src=None, repository=None, tag=None, image=None,
-                     changes=None, stream_src=False):
-        if not (src or image):
-            raise errors.DockerException(
-                'Must specify src or image to import from'
-            )
-        u = self._url('/images/create')
-
-        params = _import_image_params(
-            repository, tag, image,
-            src=(src if isinstance(src, six.string_types) else None),
-            changes=changes
-        )
-        headers = {'Content-Type': 'application/tar'}
-
-        if image or params.get('fromSrc') != '-':  # from image or URL
-            return self._result(
-                self._post(u, data=None, params=params)
-            )
-        elif isinstance(src, six.string_types):  # from file path
-            with open(src, 'rb') as f:
-                return self._result(
-                    self._post(
-                        u, data=f, params=params, headers=headers, timeout=None
-                    )
-                )
-        else:  # from raw data
-            if stream_src:
-                headers['Transfer-Encoding'] = 'chunked'
-            return self._result(
-                self._post(u, data=src, params=params, headers=headers)
-            )
-
-    def import_image_from_data(self, data, repository=None, tag=None,
-                               changes=None):
-        u = self._url('/images/create')
-        params = _import_image_params(
-            repository, tag, src='-', changes=changes
-        )
-        headers = {'Content-Type': 'application/tar'}
-        return self._result(
-            self._post(
-                u, data=data, params=params, headers=headers, timeout=None
-            )
-        )
-        return self.import_image(
-            src=data, repository=repository, tag=tag, changes=changes
-        )
-
-    def import_image_from_file(self, filename, repository=None, tag=None,
-                               changes=None):
-        return self.import_image(
-            src=filename, repository=repository, tag=tag, changes=changes
-        )
-
-    def import_image_from_stream(self, stream, repository=None, tag=None,
-                                 changes=None):
-        return self.import_image(
-            src=stream, stream_src=True, repository=repository, tag=tag,
-            changes=changes
-        )
-
-    def import_image_from_url(self, url, repository=None, tag=None,
-                              changes=None):
-        return self.import_image(
-            src=url, repository=repository, tag=tag, changes=changes
-        )
-
-    def import_image_from_image(self, image, repository=None, tag=None,
-                                changes=None):
-        return self.import_image(
-            image=image, repository=repository, tag=tag, changes=changes
-        )
-
-    @utils.check_resource
-    def insert(self, image, url, path):
-        if utils.compare_version('1.12', self._version) >= 0:
-            raise errors.DeprecatedMethod(
-                'insert is not available for API version >=1.12'
-            )
-        api_url = self._url("/images/{0}/insert", image)
-        params = {
-            'url': url,
-            'path': path
-        }
-        return self._result(self._post(api_url, params=params))
-
-    @utils.check_resource
-    def inspect_image(self, image):
-        return self._result(
-            self._get(self._url("/images/{0}/json", image)), True
-        )
-
-    def load_image(self, data):
-        res = self._post(self._url("/images/load"), data=data)
-        self._raise_for_status(res)
-
-    def pull(self, repository, tag=None, stream=False,
-             insecure_registry=False, auth_config=None, decode=False):
-        if insecure_registry:
-            warnings.warn(
-                INSECURE_REGISTRY_DEPRECATION_WARNING.format('pull()'),
-                DeprecationWarning
-            )
-
-        if not tag:
-            repository, tag = utils.parse_repository_tag(repository)
-        registry, repo_name = auth.resolve_repository_name(repository)
-
-        params = {
-            'tag': tag,
-            'fromImage': repository
-        }
-        headers = {}
-
-        if utils.compare_version('1.5', self._version) >= 0:
-            if auth_config is None:
-                header = auth.get_config_header(self, registry)
-                if header:
-                    headers['X-Registry-Auth'] = header
-            else:
-                log.debug('Sending supplied auth config')
-                headers['X-Registry-Auth'] = auth.encode_header(auth_config)
-
-        response = self._post(
-            self._url('/images/create'), params=params, headers=headers,
-            stream=stream, timeout=None
-        )
-
-        self._raise_for_status(response)
-
-        if stream:
-            return self._stream_helper(response, decode=decode)
-
-        return self._result(response)
-
-    def push(self, repository, tag=None, stream=False,
-             insecure_registry=False, auth_config=None, decode=False):
-        if insecure_registry:
-            warnings.warn(
-                INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
-                DeprecationWarning
-            )
-
-        if not tag:
-            repository, tag = utils.parse_repository_tag(repository)
-        registry, repo_name = auth.resolve_repository_name(repository)
-        u = self._url("/images/{0}/push", repository)
-        params = {
-            'tag': tag
-        }
-        headers = {}
-
-        if utils.compare_version('1.5', self._version) >= 0:
-            if auth_config is None:
-                header = auth.get_config_header(self, registry)
-                if header:
-                    headers['X-Registry-Auth'] = header
-            else:
-                log.debug('Sending supplied auth config')
-                headers['X-Registry-Auth'] = auth.encode_header(auth_config)
-
-        response = self._post_json(
-            u, None, headers=headers, stream=stream, params=params
-        )
-
-        self._raise_for_status(response)
-
-        if stream:
-            return self._stream_helper(response, decode=decode)
-
-        return self._result(response)
-
-    @utils.check_resource
-    def remove_image(self, image, force=False, noprune=False):
-        params = {'force': force, 'noprune': noprune}
-        res = self._delete(self._url("/images/{0}", image), params=params)
-        self._raise_for_status(res)
-
-    def search(self, term):
-        return self._result(
-            self._get(self._url("/images/search"), params={'term': term}),
-            True
-        )
-
-    @utils.check_resource
-    def tag(self, image, repository, tag=None, force=False):
-        params = {
-            'tag': tag,
-            'repo': repository,
-            'force': 1 if force else 0
-        }
-        url = self._url("/images/{0}/tag", image)
-        res = self._post(url, params=params)
-        self._raise_for_status(res)
-        return res.status_code == 201
-
-
-def is_file(src):
-    try:
-        return (
-            isinstance(src, six.string_types) and
-            os.path.isfile(src)
-        )
-    except TypeError:  # a data string will make isfile() raise a TypeError
-        return False
-
-
-def _import_image_params(repo, tag, image=None, src=None,
-                         changes=None):
-    params = {
-        'repo': repo,
-        'tag': tag,
-    }
-    if image:
-        params['fromImage'] = image
-    elif src and not is_file(src):
-        params['fromSrc'] = src
-    else:
-        params['fromSrc'] = '-'
-
-    if changes:
-        params['changes'] = changes
-
-    return params
diff --git a/env2/lib/python2.7/site-packages/docker/api/network.py b/env2/lib/python2.7/site-packages/docker/api/network.py
deleted file mode 100644
index 0ee0dab..0000000
--- a/env2/lib/python2.7/site-packages/docker/api/network.py
+++ /dev/null
@@ -1,107 +0,0 @@
-import json
-
-from ..errors import InvalidVersion
-from ..utils import check_resource, minimum_version
-from ..utils import version_lt
-
-
-class NetworkApiMixin(object):
-    @minimum_version('1.21')
-    def networks(self, names=None, ids=None):
-        filters = {}
-        if names:
-            filters['name'] = names
-        if ids:
-            filters['id'] = ids
-
-        params = {'filters': json.dumps(filters)}
-
-        url = self._url("/networks")
-        res = self._get(url, params=params)
-        return self._result(res, json=True)
-
-    @minimum_version('1.21')
-    def create_network(self, name, driver=None, options=None, ipam=None,
-                       check_duplicate=None, internal=False, labels=None,
-                       enable_ipv6=False):
-        if options is not None and not isinstance(options, dict):
-            raise TypeError('options must be a dictionary')
-
-        data = {
-            'Name': name,
-            'Driver': driver,
-            'Options': options,
-            'IPAM': ipam,
-            'CheckDuplicate': check_duplicate
-        }
-
-        if labels is not None:
-            if version_lt(self._version, '1.23'):
-                raise InvalidVersion(
-                    'network labels were introduced in API 1.23'
-                )
-            if not isinstance(labels, dict):
-                raise TypeError('labels must be a dictionary')
-            data["Labels"] = labels
-
-        if enable_ipv6:
-            if version_lt(self._version, '1.23'):
-                raise InvalidVersion(
-                    'enable_ipv6 was introduced in API 1.23'
-                )
-            data['EnableIPv6'] = True
-
-        if internal:
-            if version_lt(self._version, '1.22'):
-                raise InvalidVersion('Internal networks are not '
-                                     'supported in API version < 1.22')
-            data['Internal'] = True
-
-        url = self._url("/networks/create")
-        res = self._post_json(url, data=data)
-        return self._result(res, json=True)
-
-    @minimum_version('1.21')
-    def remove_network(self, net_id):
-        url = self._url("/networks/{0}", net_id)
-        res = self._delete(url)
-        self._raise_for_status(res)
-
-    @minimum_version('1.21')
-    def inspect_network(self, net_id):
-        url = self._url("/networks/{0}", net_id)
-        res = self._get(url)
-        return self._result(res, json=True)
-
-    @check_resource
-    @minimum_version('1.21')
-    def connect_container_to_network(self, container, net_id,
-                                     ipv4_address=None, ipv6_address=None,
-                                     aliases=None, links=None,
-                                     link_local_ips=None):
-        data = {
-            "Container": container,
-            "EndpointConfig": self.create_endpoint_config(
-                aliases=aliases, links=links, ipv4_address=ipv4_address,
-                ipv6_address=ipv6_address, link_local_ips=link_local_ips
-            ),
-        }
-
-        url = self._url("/networks/{0}/connect", net_id)
-        res = self._post_json(url, data=data)
-        self._raise_for_status(res)
-
-    @check_resource
-    @minimum_version('1.21')
-    def disconnect_container_from_network(self, container, net_id,
-                                          force=False):
-        data = {"Container": container}
-        if force:
-            if version_lt(self._version, '1.22'):
-                raise InvalidVersion(
-                    'Forced disconnect was introduced in API 1.22'
-                )
-            data['Force'] = force
-        url = self._url("/networks/{0}/disconnect", net_id)
-        res = self._post_json(url, data=data)
-        self._raise_for_status(res)
diff --git a/env2/lib/python2.7/site-packages/docker/api/service.py b/env2/lib/python2.7/site-packages/docker/api/service.py
deleted file mode 100644
index baebbad..0000000
--- a/env2/lib/python2.7/site-packages/docker/api/service.py
+++ /dev/null
@@ -1,105 +0,0 @@
-from .. import errors
-from .. import utils
-from ..auth import auth
-
-
-class ServiceApiMixin(object):
-    @utils.minimum_version('1.24')
-    def create_service(
-            self, task_template, name=None, labels=None, mode=None,
-            update_config=None, networks=None, endpoint_config=None
-    ):
-        url = self._url('/services/create')
-        headers = {}
-        image = task_template.get('ContainerSpec', {}).get('Image', None)
-        if image is None:
-            raise errors.DockerException(
-                'Missing mandatory Image key in ContainerSpec'
-            )
-        registry, repo_name = auth.resolve_repository_name(image)
-        auth_header = auth.get_config_header(self, registry)
-        if auth_header:
-            headers['X-Registry-Auth'] = auth_header
-        data = {
-            'Name': name,
-            'Labels': labels,
-            'TaskTemplate': task_template,
-            'Mode': mode,
-            'UpdateConfig': update_config,
-            'Networks': networks,
-            'Endpoint': endpoint_config
-        }
-        return self._result(
-            self._post_json(url, data=data, headers=headers), True
-        )
-
-    @utils.minimum_version('1.24')
-    @utils.check_resource
-    def inspect_service(self, service):
-        url = self._url('/services/{0}', service)
-        return self._result(self._get(url), True)
-
-    @utils.minimum_version('1.24')
-    @utils.check_resource
-    def inspect_task(self, task):
-        url = self._url('/tasks/{0}', task)
-        return self._result(self._get(url), True)
-
-    @utils.minimum_version('1.24')
-    @utils.check_resource
-    def remove_service(self, service):
-        url = self._url('/services/{0}', service)
-        resp = self._delete(url)
-        self._raise_for_status(resp)
-        return True
-
-    @utils.minimum_version('1.24')
-    def services(self, filters=None):
-        params = {
-            'filters': utils.convert_filters(filters) if filters else None
-        }
-        url = self._url('/services')
-        return self._result(self._get(url, params=params), True)
-
-    @utils.minimum_version('1.24')
-    def tasks(self, filters=None):
-        params = {
-            'filters': utils.convert_filters(filters) if filters else None
-        }
-        url = self._url('/tasks')
-        return self._result(self._get(url, params=params), True)
-
-    @utils.minimum_version('1.24')
-    @utils.check_resource
-    def update_service(self, service, version, task_template=None, name=None,
-                       labels=None, mode=None, update_config=None,
-                       networks=None, endpoint_config=None):
-        url = self._url('/services/{0}/update', service)
-        data = {}
-        headers = {}
-        if name is not None:
-            data['Name'] = name
-        if labels is not None:
-            data['Labels'] = labels
-        if mode is not None:
-            data['Mode'] = mode
-        if task_template is not None:
-            image = task_template.get('ContainerSpec', {}).get('Image', None)
-            if image is not None:
-                registry, repo_name = auth.resolve_repository_name(image)
-                auth_header = auth.get_config_header(self, registry)
-                if auth_header:
-                    headers['X-Registry-Auth'] = auth_header
-            data['TaskTemplate'] = task_template
-        if update_config is not None:
-            data['UpdateConfig'] = update_config
-        if networks is not None:
-            data['Networks'] = networks
-        if endpoint_config is not None:
-            data['Endpoint'] = endpoint_config
-
-        resp = self._post_json(
-            url, data=data, params={'version': version}, headers=headers
-        )
-        self._raise_for_status(resp)
-        return True
diff --git a/env2/lib/python2.7/site-packages/docker/api/swarm.py b/env2/lib/python2.7/site-packages/docker/api/swarm.py
deleted file mode 100644
index d099364..0000000
--- a/env2/lib/python2.7/site-packages/docker/api/swarm.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from .. import utils
-import logging
-log = logging.getLogger(__name__)
-
-
-class SwarmApiMixin(object):
-
-    def create_swarm_spec(self, *args, **kwargs):
-        return utils.SwarmSpec(*args, **kwargs)
-
-    @utils.minimum_version('1.24')
-    def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
-                   force_new_cluster=False, swarm_spec=None):
-        url = self._url('/swarm/init')
-        if swarm_spec is not None and not isinstance(swarm_spec, dict):
-            raise TypeError('swarm_spec must be a dictionary')
-        data = {
-            'AdvertiseAddr': advertise_addr,
-            'ListenAddr': listen_addr,
-            'ForceNewCluster': force_new_cluster,
-            'Spec': swarm_spec,
-        }
-        response = self._post_json(url, data=data)
-        self._raise_for_status(response)
-        return True
-
-    @utils.minimum_version('1.24')
-    def inspect_swarm(self):
-        url = self._url('/swarm')
-        return self._result(self._get(url), True)
-
-    @utils.check_resource
-    @utils.minimum_version('1.24')
-    def inspect_node(self, node_id):
-        url = self._url('/nodes/{0}', node_id)
-        return self._result(self._get(url), True)
-
-    @utils.minimum_version('1.24')
-    def join_swarm(self, remote_addrs, join_token, listen_addr=None,
-                   advertise_addr=None):
-        data = {
-            "RemoteAddrs": remote_addrs,
-            "ListenAddr": listen_addr,
-            "JoinToken": join_token,
-            "AdvertiseAddr": advertise_addr,
-        }
-        url = self._url('/swarm/join')
-        response = self._post_json(url, data=data)
-        self._raise_for_status(response)
-        return True
-
-    @utils.minimum_version('1.24')
-    def leave_swarm(self, force=False):
-        url = self._url('/swarm/leave')
-        response = self._post(url, params={'force': force})
-        self._raise_for_status(response)
-        return True
-
-    @utils.minimum_version('1.24')
-    def nodes(self, filters=None):
-        url = self._url('/nodes')
-        params = {}
-        if filters:
-            params['filters'] = utils.convert_filters(filters)
-
-        return self._result(self._get(url, params=params), True)
-
-    @utils.minimum_version('1.24')
-    def update_swarm(self, version, swarm_spec=None, rotate_worker_token=False,
-                     rotate_manager_token=False):
-        url = self._url('/swarm/update')
-        response = self._post_json(url, data=swarm_spec, params={
-            'rotateWorkerToken': rotate_worker_token,
-            'rotateManagerToken': rotate_manager_token,
-            'version': version
-        })
-        self._raise_for_status(response)
-        return True
diff --git a/env2/lib/python2.7/site-packages/docker/api/volume.py b/env2/lib/python2.7/site-packages/docker/api/volume.py
deleted file mode 100644
index afc72cb..0000000
--- a/env2/lib/python2.7/site-packages/docker/api/volume.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from .. import errors
-from .. import utils
-
-
-class VolumeApiMixin(object):
-    @utils.minimum_version('1.21')
-    def volumes(self, filters=None):
-        params = {
-            'filters': utils.convert_filters(filters) if filters else None
-        }
-        url = self._url('/volumes')
-        return self._result(self._get(url, params=params), True)
-
-    @utils.minimum_version('1.21')
-    def create_volume(self, name, driver=None, driver_opts=None, labels=None):
-        url = self._url('/volumes/create')
-        if driver_opts is not None and not isinstance(driver_opts, dict):
-            raise TypeError('driver_opts must be a dictionary')
-
-        data = {
-            'Name': name,
-            'Driver': driver,
-            'DriverOpts': driver_opts,
-        }
-
-        if labels is not None:
-            if utils.compare_version('1.23', self._version) < 0:
-                raise errors.InvalidVersion(
-                    'volume labels were introduced in API 1.23'
-                )
-            if not isinstance(labels, dict):
-                raise TypeError('labels must be a dictionary')
-            data["Labels"] = labels
-
-        return self._result(self._post_json(url, data=data), True)
-
-    @utils.minimum_version('1.21')
-    def inspect_volume(self, name):
-        url = self._url('/volumes/{0}', name)
-        return self._result(self._get(url), True)
-
-    @utils.minimum_version('1.21')
-    def remove_volume(self, name):
-        url = self._url('/volumes/{0}', name)
-        resp = self._delete(url)
-        self._raise_for_status(resp)
diff --git a/env2/lib/python2.7/site-packages/docker/auth/__init__.py b/env2/lib/python2.7/site-packages/docker/auth/__init__.py
deleted file mode 100644
index 6fc83f8..0000000
--- a/env2/lib/python2.7/site-packages/docker/auth/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from .auth import (
-    INDEX_NAME,
-    INDEX_URL,
-    encode_header,
-    load_config,
-    resolve_authconfig,
-    resolve_repository_name,
-)  # flake8: noqa
\ No newline at end of file
diff --git a/env2/lib/python2.7/site-packages/docker/auth/auth.py b/env2/lib/python2.7/site-packages/docker/auth/auth.py
deleted file mode 100644
index dc0baea..0000000
--- a/env2/lib/python2.7/site-packages/docker/auth/auth.py
+++ /dev/null
@@ -1,303 +0,0 @@
-import base64
-import json
-import logging
-import os
-
-import dockerpycreds
-import six
-
-from .. import errors
-
-INDEX_NAME = 'docker.io'
-INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
-DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
-LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
-TOKEN_USERNAME = '<token>'
-
-log = logging.getLogger(__name__)
-
-
-def resolve_repository_name(repo_name):
-    if '://' in repo_name:
-        raise errors.InvalidRepository(
-            'Repository name cannot contain a scheme ({0})'.format(repo_name)
-        )
-
-    index_name, remote_name = split_repo_name(repo_name)
-    if index_name[0] == '-' or index_name[-1] == '-':
-        raise errors.InvalidRepository(
-            'Invalid index name ({0}). Cannot begin or end with a'
-            ' hyphen.'.format(index_name)
-        )
-    return resolve_index_name(index_name), remote_name
-
-
-def resolve_index_name(index_name):
-    index_name = convert_to_hostname(index_name)
-    if index_name == 'index.' + INDEX_NAME:
-        index_name = INDEX_NAME
-    return index_name
-
-
-def get_config_header(client, registry):
-    log.debug('Looking for auth config')
-    if not client._auth_configs:
-        log.debug(
-            "No auth config in memory - loading from filesystem"
-        )
-        client._auth_configs = load_config()
-    authcfg = resolve_authconfig(client._auth_configs, registry)
-    # Do not fail here if no authentication exists for this
-    # specific registry as we can have a readonly pull. Just
-    # put the header if we can.
-    if authcfg:
-        log.debug('Found auth config')
-        # auth_config needs to be a dict in the format used by
-        # auth.py username , password, serveraddress, email
-        return encode_header(authcfg)
-    log.debug('No auth config found')
-    return None
-
-
-def split_repo_name(repo_name):
-    parts = repo_name.split('/', 1)
-    if len(parts) == 1 or (
-        '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
-    ):
-        # This is a docker index repo (ex: username/foobar or ubuntu)
-        return INDEX_NAME, repo_name
-    return tuple(parts)
-
-
-def resolve_authconfig(authconfig, registry=None):
-    """
-    Returns the authentication data from the given auth configuration for a
-    specific registry. As with the Docker client, legacy entries in the config
-    with full URLs are stripped down to hostnames before checking for a match.
-    Returns None if no match was found.
-    """
-    if 'credsStore' in authconfig:
-        log.debug(
-            'Using credentials store "{0}"'.format(authconfig['credsStore'])
-        )
-        return _resolve_authconfig_credstore(
-            authconfig, registry, authconfig['credsStore']
-        )
-    # Default to the public index server
-    registry = resolve_index_name(registry) if registry else INDEX_NAME
-    log.debug("Looking for auth entry for {0}".format(repr(registry)))
-
-    if registry in authconfig:
-        log.debug("Found {0}".format(repr(registry)))
-        return authconfig[registry]
-
-    for key, config in six.iteritems(authconfig):
-        if resolve_index_name(key) == registry:
-            log.debug("Found {0}".format(repr(key)))
-            return config
-
-    log.debug("No entry found")
-    return None
-
-
-def _resolve_authconfig_credstore(authconfig, registry, credstore_name):
-    if not registry or registry == INDEX_NAME:
-        # The ecosystem is a little schizophrenic with index.docker.io VS
-        # docker.io - in that case, it seems the full URL is necessary.
-        registry = 'https://index.docker.io/v1/'
-    log.debug("Looking for auth entry for {0}".format(repr(registry)))
-    store = dockerpycreds.Store(credstore_name)
-    try:
-        data = store.get(registry)
-        res = {
-            'ServerAddress': registry,
-        }
-        if data['Username'] == TOKEN_USERNAME:
-            res['IdentityToken'] = data['Secret']
-        else:
-            res.update({
-                'Username': data['Username'],
-                'Password': data['Secret'],
-            })
-        return res
-    except dockerpycreds.CredentialsNotFound as e:
-        log.debug('No entry found')
-        return None
-    except dockerpycreds.StoreError as e:
-        raise errors.DockerException(
-            'Credentials store error: {0}'.format(repr(e))
-        )
-
-
-def convert_to_hostname(url):
-    return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
-
-
-def decode_auth(auth):
-    if isinstance(auth, six.string_types):
-        auth = auth.encode('ascii')
-    s = base64.b64decode(auth)
-    login, pwd = s.split(b':', 1)
-    return login.decode('utf8'), pwd.decode('utf8')
-
-
-def encode_header(auth):
-    auth_json = json.dumps(auth).encode('ascii')
-    return base64.urlsafe_b64encode(auth_json)
-
-
-def parse_auth(entries, raise_on_error=False):
-    """
-    Parses authentication entries
-
-    Args:
-      entries:        Dict of authentication entries.
-      raise_on_error: If set to true, an invalid format will raise
-                      InvalidConfigFile
-
-    Returns:
-      Authentication registry.
-    """
-
-    conf = {}
-    for registry, entry in six.iteritems(entries):
-        if not isinstance(entry, dict):
-            log.debug(
-                'Config entry for key {0} is not auth config'.format(registry)
-            )
-            # We sometimes fall back to parsing the whole config as if it was
-            # the auth config by itself, for legacy purposes. In that case, we
-            # fail silently and return an empty conf if any of the keys is not
-            # formatted properly.
-            if raise_on_error:
-                raise errors.InvalidConfigFile(
-                    'Invalid configuration for registry {0}'.format(registry)
-                )
-            return {}
-        if 'identitytoken' in entry:
-            log.debug('Found an IdentityToken entry for registry {0}'.format(
-                registry
-            ))
-            conf[registry] = {
-                'IdentityToken': entry['identitytoken']
-            }
-            continue  # Other values are irrelevant if we have a token, skip.
-
-        if 'auth' not in entry:
-            # Starting with engine v1.11 (API 1.23), an empty dictionary is
-            # a valid value in the auths config.
-            # https://github.com/docker/compose/issues/3265
-            log.debug(
-                'Auth data for {0} is absent. Client might be using a '
-                'credentials store instead.'
-            )
-            conf[registry] = {}
-            continue
-
-        username, password = decode_auth(entry['auth'])
-        log.debug(
-            'Found entry (registry={0}, username={1})'
-            .format(repr(registry), repr(username))
-        )
-
-        conf[registry] = {
-            'username': username,
-            'password': password,
-            'email': entry.get('email'),
-            'serveraddress': registry,
-        }
-    return conf
-
-
-def find_config_file(config_path=None):
-    environment_path = os.path.join(
-        os.environ.get('DOCKER_CONFIG'),
-        os.path.basename(DOCKER_CONFIG_FILENAME)
-    ) if os.environ.get('DOCKER_CONFIG') else None
-
-    paths = filter(None, [
-        config_path,  # 1
-        environment_path,  # 2
-        os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME),  # 3
-        os.path.join(
-            os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME
-        )  # 4
-    ])
-
-    log.debug("Trying paths: {0}".format(repr(paths)))
-
-    for path in paths:
-        if os.path.exists(path):
-            log.debug("Found file at path: {0}".format(path))
-            return path
-
-    log.debug("No config file found")
-
-    return None
-
-
-def load_config(config_path=None):
-    """
-    Loads authentication data from a Docker configuration file in the given
-    root directory or if config_path is passed use given path.
-    Lookup priority:
-        explicit config_path parameter > DOCKER_CONFIG environment variable >
-        ~/.docker/config.json > ~/.dockercfg
-    """
-    config_file = find_config_file(config_path)
-
-    if not config_file:
-        return {}
-
-    try:
-        with open(config_file) as f:
-            data = json.load(f)
-            res = {}
-            if data.get('auths'):
-                log.debug("Found 'auths' section")
-                res.update(parse_auth(data['auths'], raise_on_error=True))
-            if data.get('HttpHeaders'):
-                log.debug("Found 'HttpHeaders' section")
-                res.update({'HttpHeaders': data['HttpHeaders']})
-            if data.get('credsStore'):
-                log.debug("Found 'credsStore' section")
-                res.update({'credsStore': data['credsStore']})
-            if res:
-                return res
-            else:
-                log.debug("Couldn't find 'auths' or 'HttpHeaders' sections")
-                f.seek(0)
-                return parse_auth(json.load(f))
-    except (IOError, KeyError, ValueError) as e:
-        # Likely missing new Docker config file or it's in an
-        # unknown format, continue to attempt to read old location
-        # and format.
-        log.debug(e)
-
-    log.debug("Attempting to parse legacy auth file format")
-    try:
-        data = []
-        with open(config_file) as f:
-            for line in f.readlines():
-                data.append(line.strip().split(' = ')[1])
-            if len(data) < 2:
-                # Not enough data
-                raise errors.InvalidConfigFile(
-                    'Invalid or empty configuration file!'
-                )
-
-        username, password = decode_auth(data[0])
-        return {
-            INDEX_NAME: {
-                'username': username,
-                'password': password,
-                'email': data[1],
-                'serveraddress': INDEX_URL,
-            }
-        }
-    except Exception as e:
-        log.debug(e)
-        pass
-
-    log.debug("All parsing attempts failed - returning empty config")
-    return {}
diff --git a/env2/lib/python2.7/site-packages/docker/client.py b/env2/lib/python2.7/site-packages/docker/client.py
deleted file mode 100644
index 3fa19e0..0000000
--- a/env2/lib/python2.7/site-packages/docker/client.py
+++ /dev/null
@@ -1,406 +0,0 @@
-import json
-import struct
-from functools import partial
-
-import requests
-import requests.exceptions
-import six
-import websocket
-
-
-from . import api
-from . import constants
-from . import errors
-from .auth import auth
-from .ssladapter import ssladapter
-from .tls import TLSConfig
-from .transport import UnixAdapter
-from .utils import utils, check_resource, update_headers, kwargs_from_env
-from .utils.socket import frames_iter
-try:
-    from .transport import NpipeAdapter
-except ImportError:
-    pass
-
-
-def from_env(**kwargs):
-    return Client.from_env(**kwargs)
-
-
-class Client(
-        requests.Session,
-        api.BuildApiMixin,
-        api.ContainerApiMixin,
-        api.DaemonApiMixin,
-        api.ExecApiMixin,
-        api.ImageApiMixin,
-        api.NetworkApiMixin,
-        api.ServiceApiMixin,
-        api.SwarmApiMixin,
-        api.VolumeApiMixin):
-    def __init__(self, base_url=None, version=None,
-                 timeout=constants.DEFAULT_TIMEOUT_SECONDS, tls=False,
-                 user_agent=constants.DEFAULT_USER_AGENT,
-                 num_pools=constants.DEFAULT_NUM_POOLS):
-        super(Client, self).__init__()
-
-        if tls and not base_url:
-            raise errors.TLSParameterError(
-                'If using TLS, the base_url argument must be provided.'
-            )
-
-        self.base_url = base_url
-        self.timeout = timeout
-        self.headers['User-Agent'] = user_agent
-
-        self._auth_configs = auth.load_config()
-
-        base_url = utils.parse_host(
-            base_url, constants.IS_WINDOWS_PLATFORM, tls=bool(tls)
-        )
-        if base_url.startswith('http+unix://'):
-            self._custom_adapter = UnixAdapter(
-                base_url, timeout, num_pools=num_pools
-            )
-            self.mount('http+docker://', self._custom_adapter)
-            self._unmount('http://', 'https://')
-            self.base_url = 'http+docker://localunixsocket'
-        elif base_url.startswith('npipe://'):
-            if not constants.IS_WINDOWS_PLATFORM:
-                raise errors.DockerException(
-                    'The npipe:// protocol is only supported on Windows'
-                )
-            try:
-                self._custom_adapter = NpipeAdapter(
-                    base_url, timeout, num_pools=num_pools
-                )
-            except NameError:
-                raise errors.DockerException(
-                    'Install pypiwin32 package to enable npipe:// support'
-                )
-            self.mount('http+docker://', self._custom_adapter)
-            self.base_url = 'http+docker://localnpipe'
-        else:
-            # Use SSLAdapter for the ability to specify SSL version
-            if isinstance(tls, TLSConfig):
-                tls.configure_client(self)
-            elif tls:
-                self._custom_adapter = ssladapter.SSLAdapter(
-                    pool_connections=num_pools
-                )
-                self.mount('https://', self._custom_adapter)
-            self.base_url = base_url
-
-        # version detection needs to be after unix adapter mounting
-        if version is None:
-            self._version = constants.DEFAULT_DOCKER_API_VERSION
-        elif isinstance(version, six.string_types):
-            if version.lower() == 'auto':
-                self._version = self._retrieve_server_version()
-            else:
-                self._version = version
-        else:
-            raise errors.DockerException(
-                'Version parameter must be a string or None. Found {0}'.format(
-                    type(version).__name__
-                )
-            )
-
-    @classmethod
-    def from_env(cls, **kwargs):
-        version = kwargs.pop('version', None)
-        return cls(version=version, **kwargs_from_env(**kwargs))
-
-    def _retrieve_server_version(self):
-        try:
-            return self.version(api_version=False)["ApiVersion"]
-        except KeyError:
-            raise errors.DockerException(
-                'Invalid response from docker daemon: key "ApiVersion"'
-                ' is missing.'
-            )
-        except Exception as e:
-            raise errors.DockerException(
-                'Error while fetching server API version: {0}'.format(e)
-            )
-
-    def _set_request_timeout(self, kwargs):
-        """Prepare the kwargs for an HTTP request by inserting the timeout
-        parameter, if not already present."""
-        kwargs.setdefault('timeout', self.timeout)
-        return kwargs
-
-    @update_headers
-    def _post(self, url, **kwargs):
-        return self.post(url, **self._set_request_timeout(kwargs))
-
-    @update_headers
-    def _get(self, url, **kwargs):
-        return self.get(url, **self._set_request_timeout(kwargs))
-
-    @update_headers
-    def _put(self, url, **kwargs):
-        return self.put(url, **self._set_request_timeout(kwargs))
-
-    @update_headers
-    def _delete(self, url, **kwargs):
-        return self.delete(url, **self._set_request_timeout(kwargs))
-
-    def _url(self, pathfmt, *args, **kwargs):
-        for arg in args:
-            if not isinstance(arg, six.string_types):
-                raise ValueError(
-                    'Expected a string but found {0} ({1}) '
-                    'instead'.format(arg, type(arg))
-                )
-
-        quote_f = partial(six.moves.urllib.parse.quote_plus, safe="/:")
-        args = map(quote_f, args)
-
-        if kwargs.get('versioned_api', True):
-            return '{0}/v{1}{2}'.format(
-                self.base_url, self._version, pathfmt.format(*args)
-            )
-        else:
-            return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
-
-    def _raise_for_status(self, response, explanation=None):
-        """Raises stored :class:`APIError`, if one occurred."""
-        try:
-            response.raise_for_status()
-        except requests.exceptions.HTTPError as e:
-            if e.response.status_code == 404:
-                raise errors.NotFound(e, response, explanation=explanation)
-            raise errors.APIError(e, response, explanation=explanation)
-
-    def _result(self, response, json=False, binary=False):
-        assert not (json and binary)
-        self._raise_for_status(response)
-
-        if json:
-            return response.json()
-        if binary:
-            return response.content
-        return response.text
-
-    def _post_json(self, url, data, **kwargs):
-        # Go <1.1 can't unserialize null to a string
-        # so we do this disgusting thing here.
-        data2 = {}
-        if data is not None:
-            for k, v in six.iteritems(data):
-                if v is not None:
-                    data2[k] = v
-
-        if 'headers' not in kwargs:
-            kwargs['headers'] = {}
-        kwargs['headers']['Content-Type'] = 'application/json'
-        return self._post(url, data=json.dumps(data2), **kwargs)
-
-    def _attach_params(self, override=None):
-        return override or {
-            'stdout': 1,
-            'stderr': 1,
-            'stream': 1
-        }
-
-    @check_resource
-    def _attach_websocket(self, container, params=None):
-        url = self._url("/containers/{0}/attach/ws", container)
-        req = requests.Request("POST", url, params=self._attach_params(params))
-        full_url = req.prepare().url
-        full_url = full_url.replace("http://", "ws://", 1)
-        full_url = full_url.replace("https://", "wss://", 1)
-        return self._create_websocket_connection(full_url)
-
-    def _create_websocket_connection(self, url):
-        return websocket.create_connection(url)
-
-    def _get_raw_response_socket(self, response):
-        self._raise_for_status(response)
-        if self.base_url == "http+docker://localnpipe":
-            sock = response.raw._fp.fp.raw.sock
-        elif six.PY3:
-            sock = response.raw._fp.fp.raw
-            if self.base_url.startswith("https://"):
-                sock = sock._sock
-        else:
-            sock = response.raw._fp.fp._sock
-        try:
-            # Keep a reference to the response to stop it being garbage
-            # collected. If the response is garbage collected, it will
-            # close TLS sockets.
-            sock._response = response
-        except AttributeError:
-            # UNIX sockets can't have attributes set on them, but that's
-            # fine because we won't be doing TLS over them
-            pass
-
-        return sock
-
-    def _stream_helper(self, response, decode=False):
-        """Generator for data coming from a chunked-encoded HTTP response."""
-        if response.raw._fp.chunked:
-            reader = response.raw
-            while not reader.closed:
-                # this read call will block until we get a chunk
-                data = reader.read(1)
-                if not data:
-                    break
-                if reader._fp.chunk_left:
-                    data += reader.read(reader._fp.chunk_left)
-                if decode:
-                    if six.PY3:
-                        data = data.decode('utf-8')
-                    # remove the trailing newline
-                    data = data.strip()
-                    # split the data at any newlines
-                    data_list = data.split("\r\n")
-                    # load and yield each line seperately
-                    for data in data_list:
-                        data = json.loads(data)
-                        yield data
-                else:
-                    yield data
-        else:
-            # Response isn't chunked, meaning we probably
-            # encountered an error immediately
-            yield self._result(response, json=decode)
-
-    def _multiplexed_buffer_helper(self, response):
-        """A generator of multiplexed data blocks read from a buffered
-        response."""
-        buf = self._result(response, binary=True)
-        walker = 0
-        while True:
-            if len(buf[walker:]) < 8:
-                break
-            _, length = struct.unpack_from('>BxxxL', buf[walker:])
-            start = walker + constants.STREAM_HEADER_SIZE_BYTES
-            end = start + length
-            walker = end
-            yield buf[start:end]
-
-    def _multiplexed_response_stream_helper(self, response):
-        """A generator of multiplexed data blocks coming from a response
-        stream."""
-
-        # Disable timeout on the underlying socket to prevent
-        # Read timed out(s) for long running processes
-        socket = self._get_raw_response_socket(response)
-        self._disable_socket_timeout(socket)
-
-        while True:
-            header = response.raw.read(constants.STREAM_HEADER_SIZE_BYTES)
-            if not header:
-                break
-            _, length = struct.unpack('>BxxxL', header)
-            if not length:
-                continue
-            data = response.raw.read(length)
-            if not data:
-                break
-            yield data
-
-    def _stream_raw_result_old(self, response):
-        ''' Stream raw output for API versions below 1.6 '''
-        self._raise_for_status(response)
-        for line in response.iter_lines(chunk_size=1,
-                                        decode_unicode=True):
-            # filter out keep-alive new lines
-            if line:
-                yield line
-
-    def _stream_raw_result(self, response):
-        ''' Stream result for TTY-enabled container above API 1.6 '''
-        self._raise_for_status(response)
-        for out in response.iter_content(chunk_size=1, decode_unicode=True):
-            yield out
-
-    def _read_from_socket(self, response, stream):
-        socket = self._get_raw_response_socket(response)
-
-        if stream:
-            return frames_iter(socket)
-        else:
-            return six.binary_type().join(frames_iter(socket))
-
-    def _disable_socket_timeout(self, socket):
-        """ Depending on the combination of python version and whether we're
-        connecting over http or https, we might need to access _sock, which
-        may or may not exist; or we may need to just settimeout on socket
-        itself, which also may or may not have settimeout on it. To avoid
-        missing the correct one, we try both.
-
-        We also do not want to set the timeout if it is already disabled, as
-        you run the risk of changing a socket that was non-blocking to
-        blocking, for example when using gevent.
-        """
-        sockets = [socket, getattr(socket, '_sock', None)]
-
-        for s in sockets:
-            if not hasattr(s, 'settimeout'):
-                continue
-
-            timeout = -1
-
-            if hasattr(s, 'gettimeout'):
-                timeout = s.gettimeout()
-
-            # Don't change the timeout if it is already disabled.
-            if timeout is None or timeout == 0.0:
-                continue
-
-            s.settimeout(None)
-
-    def _get_result(self, container, stream, res):
-        cont = self.inspect_container(container)
-        return self._get_result_tty(stream, res, cont['Config']['Tty'])
-
-    def _get_result_tty(self, stream, res, is_tty):
-        # Stream multi-plexing was only introduced in API v1.6. Anything
-        # before that needs old-style streaming.
-        if utils.compare_version('1.6', self._version) < 0:
-            return self._stream_raw_result_old(res)
-
-        # We should also use raw streaming (without keep-alives)
-        # if we're dealing with a tty-enabled container.
-        if is_tty:
-            return self._stream_raw_result(res) if stream else \
-                self._result(res, binary=True)
-
-        self._raise_for_status(res)
-        sep = six.binary_type()
-        if stream:
-            return self._multiplexed_response_stream_helper(res)
-        else:
-            return sep.join(
-                [x for x in self._multiplexed_buffer_helper(res)]
-            )
-
-    def _unmount(self, *args):
-        for proto in args:
-            self.adapters.pop(proto)
-
-    def get_adapter(self, url):
-        try:
-            return super(Client, self).get_adapter(url)
-        except requests.exceptions.InvalidSchema as e:
-            if self._custom_adapter:
-                return self._custom_adapter
-            else:
-                raise e
-
-    @property
-    def api_version(self):
-        return self._version
-
-
-class AutoVersionClient(Client):
-    def __init__(self, *args, **kwargs):
-        if 'version' in kwargs and kwargs['version']:
-            raise errors.DockerException(
-                'Can not specify version for AutoVersionClient'
-            )
-        kwargs['version'] = 'auto'
-        super(AutoVersionClient, self).__init__(*args, **kwargs)
diff --git a/env2/lib/python2.7/site-packages/docker/constants.py b/env2/lib/python2.7/site-packages/docker/constants.py
deleted file mode 100644
index 0c9a020..0000000
--- a/env2/lib/python2.7/site-packages/docker/constants.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import sys
-from .version import version
-
-DEFAULT_DOCKER_API_VERSION = '1.24'
-DEFAULT_TIMEOUT_SECONDS = 60
-STREAM_HEADER_SIZE_BYTES = 8
-CONTAINER_LIMITS_KEYS = [
-    'memory', 'memswap', 'cpushares', 'cpusetcpus'
-]
-
-INSECURE_REGISTRY_DEPRECATION_WARNING = \
-    'The `insecure_registry` argument to {} ' \
-    'is deprecated and non-functional. Please remove it.'
-
-IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
-
-DEFAULT_USER_AGENT = "docker-py/{0}".format(version)
-DEFAULT_NUM_POOLS = 25
diff --git a/env2/lib/python2.7/site-packages/docker/errors.py b/env2/lib/python2.7/site-packages/docker/errors.py
deleted file mode 100644
index 97be802..0000000
--- a/env2/lib/python2.7/site-packages/docker/errors.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import requests
-
-
-class APIError(requests.exceptions.HTTPError):
-    def __init__(self, message, response, explanation=None):
-        # requests 1.2 supports response as a keyword argument, but
-        # requests 1.1 doesn't
-        super(APIError, self).__init__(message)
-        self.response = response
-
-        self.explanation = explanation
-
-        if self.explanation is None and response.content:
-            self.explanation = response.content.strip()
-
-    def __str__(self):
-        message = super(APIError, self).__str__()
-
-        if self.is_client_error():
-            message = '{0} Client Error: {1}'.format(
-                self.response.status_code, self.response.reason)
-
-        elif self.is_server_error():
-            message = '{0} Server Error: {1}'.format(
-                self.response.status_code, self.response.reason)
-
-        if self.explanation:
-            message = '{0} ("{1}")'.format(message, self.explanation)
-
-        return message
-
-    def is_client_error(self):
-        return 400 <= self.response.status_code < 500
-
-    def is_server_error(self):
-        return 500 <= self.response.status_code < 600
-
-
-class DockerException(Exception):
-    pass
-
-
-class NotFound(APIError):
-    pass
-
-
-class InvalidVersion(DockerException):
-    pass
-
-
-class InvalidRepository(DockerException):
-    pass
-
-
-class InvalidConfigFile(DockerException):
-    pass
-
-
-class DeprecatedMethod(DockerException):
-    pass
-
-
-class TLSParameterError(DockerException):
-    def __init__(self, msg):
-        self.msg = msg
-
-    def __str__(self):
-        return self.msg + (". TLS configurations should map the Docker CLI "
-                           "client configurations. See "
-                           "https://docs.docker.com/engine/articles/https/ "
-                           "for API details.")
-
-
-class NullResource(DockerException, ValueError):
-    pass
diff --git a/env2/lib/python2.7/site-packages/docker/ssladapter/__init__.py b/env2/lib/python2.7/site-packages/docker/ssladapter/__init__.py
deleted file mode 100644
index 1a5e1bb..0000000
--- a/env2/lib/python2.7/site-packages/docker/ssladapter/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .ssladapter import SSLAdapter # flake8: noqa
diff --git a/env2/lib/python2.7/site-packages/docker/ssladapter/ssladapter.py b/env2/lib/python2.7/site-packages/docker/ssladapter/ssladapter.py
deleted file mode 100644
index e17dfad..0000000
--- a/env2/lib/python2.7/site-packages/docker/ssladapter/ssladapter.py
+++ /dev/null
@@ -1,66 +0,0 @@
-""" Resolves OpenSSL issues in some servers:
-      https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
-      https://github.com/kennethreitz/requests/pull/799
-"""
-import sys
-
-from distutils.version import StrictVersion
-from requests.adapters import HTTPAdapter
-
-try:
-    import requests.packages.urllib3 as urllib3
-except ImportError:
-    import urllib3
-
-
-PoolManager = urllib3.poolmanager.PoolManager
-
-# Monkey-patching match_hostname with a version that supports
-# IP-address checking. Not necessary for Python 3.5 and above
-if sys.version_info[0] < 3 or sys.version_info[1] < 5:
-    from backports.ssl_match_hostname import match_hostname
-    urllib3.connection.match_hostname = match_hostname
-
-
-class SSLAdapter(HTTPAdapter):
-    '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
-    def __init__(self, ssl_version=None, assert_hostname=None,
-                 assert_fingerprint=None, **kwargs):
-        self.ssl_version = ssl_version
-        self.assert_hostname = assert_hostname
-        self.assert_fingerprint = assert_fingerprint
-        super(SSLAdapter, self).__init__(**kwargs)
-
-    def init_poolmanager(self, connections, maxsize, block=False):
-        kwargs = {
-            'num_pools': connections,
-            'maxsize': maxsize,
-            'block': block,
-            'assert_hostname': self.assert_hostname,
-            'assert_fingerprint': self.assert_fingerprint,
-        }
-        if self.ssl_version and self.can_override_ssl_version():
-            kwargs['ssl_version'] = self.ssl_version
-
-        self.poolmanager = PoolManager(**kwargs)
-
-    def get_connection(self, *args, **kwargs):
-        """
-        Ensure assert_hostname is set correctly on our pool
-
-        We already take care of a normal poolmanager via init_poolmanager
-
-        But we still need to take care of when there is a proxy poolmanager
-        """
-        conn = super(SSLAdapter, self).get_connection(*args, **kwargs)
-        if conn.assert_hostname != self.assert_hostname:
-            conn.assert_hostname = self.assert_hostname
-        return conn
-
-    def can_override_ssl_version(self):
-        urllib_ver = urllib3.__version__.split('-')[0]
-        if urllib_ver is None:
-            return False
-        if urllib_ver == 'dev':
-            return True
-        return StrictVersion(urllib_ver) > StrictVersion('1.5')
diff --git a/env2/lib/python2.7/site-packages/docker/tls.py b/env2/lib/python2.7/site-packages/docker/tls.py
deleted file mode 100644
index 7abfa60..0000000
--- a/env2/lib/python2.7/site-packages/docker/tls.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import os
-import ssl
-
-from . import errors
-from .ssladapter import ssladapter
-
-
-class TLSConfig(object):
-    cert = None
-    ca_cert = None
-    verify = None
-    ssl_version = None
-
-    def __init__(self, client_cert=None, ca_cert=None, verify=None,
-                 ssl_version=None, assert_hostname=None,
-                 assert_fingerprint=None):
-        # Argument compatibility/mapping with
-        # https://docs.docker.com/engine/articles/https/
-        # This diverges from the Docker CLI in that users can specify 'tls'
-        # here, but also disable any public/default CA pool verification by
-        # leaving tls_verify=False
-
-        self.assert_hostname = assert_hostname
-        self.assert_fingerprint = assert_fingerprint
-
-        # TLS v1.0 seems to be the safest default; SSLv23 fails in mysterious
-        # ways: https://github.com/docker/docker-py/issues/963
-
-        self.ssl_version = ssl_version or ssl.PROTOCOL_TLSv1
-
-        # "tls" and "tls_verify" must have both or neither cert/key files
-        # In either case, Alert the user when both are expected, but any are
-        # missing.
-
-        if client_cert:
-            try:
-                tls_cert, tls_key = client_cert
-            except ValueError:
-                raise errors.TLSParameterError(
-                    'client_config must be a tuple of'
-                    ' (client certificate, key file)'
-                )
-
-            if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
-               not os.path.isfile(tls_key)):
-                raise errors.TLSParameterError(
-                    'Path to a certificate and key files must be provided'
-                    ' through the client_config param'
-                )
-            self.cert = (tls_cert, tls_key)
-
-        # If verify is set, make sure the cert exists
-        self.verify = verify
-        self.ca_cert = ca_cert
-        if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
-            raise errors.TLSParameterError(
-                'Invalid CA certificate provided for `tls_ca_cert`.'
-            )
-
-    def configure_client(self, client):
-        client.ssl_version = self.ssl_version
-
-        if self.verify and self.ca_cert:
-            client.verify = self.ca_cert
-        else:
-            client.verify = self.verify
-
-        if self.cert:
-            client.cert = self.cert
-
-        client.mount('https://', ssladapter.SSLAdapter(
-            ssl_version=self.ssl_version,
-            assert_hostname=self.assert_hostname,
-            assert_fingerprint=self.assert_fingerprint,
-        ))
diff --git a/env2/lib/python2.7/site-packages/docker/transport/__init__.py b/env2/lib/python2.7/site-packages/docker/transport/__init__.py
deleted file mode 100644
index 46dfdf8..0000000
--- a/env2/lib/python2.7/site-packages/docker/transport/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# flake8: noqa
-from .unixconn import UnixAdapter
-try:
-    from .npipeconn import NpipeAdapter
-    from .npipesocket import NpipeSocket
-except ImportError:
-    pass
\ No newline at end of file
diff --git a/env2/lib/python2.7/site-packages/docker/transport/npipeconn.py b/env2/lib/python2.7/site-packages/docker/transport/npipeconn.py
deleted file mode 100644
index 017738e..0000000
--- a/env2/lib/python2.7/site-packages/docker/transport/npipeconn.py
+++ /dev/null
@@ -1,103 +0,0 @@
-import six
-import requests.adapters
-
-from .. import constants
-from .npipesocket import NpipeSocket
-
-if six.PY3:
-    import http.client as httplib
-else:
-    import httplib
-
-try:
-    import requests.packages.urllib3 as urllib3
-except ImportError:
-    import urllib3
-
-RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
-
-
-class NpipeHTTPConnection(httplib.HTTPConnection, object):
-    def __init__(self, npipe_path, timeout=60):
-        super(NpipeHTTPConnection, self).__init__(
-            'localhost', timeout=timeout
-        )
-        self.npipe_path = npipe_path
-        self.timeout = timeout
-
-    def connect(self):
-        sock = NpipeSocket()
-        sock.settimeout(self.timeout)
-        sock.connect(self.npipe_path)
-        self.sock = sock
-
-
-class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
-    def __init__(self, npipe_path, timeout=60, maxsize=10):
-        super(NpipeHTTPConnectionPool, self).__init__(
-            'localhost', timeout=timeout, maxsize=maxsize
-        )
-        self.npipe_path = npipe_path
-        self.timeout = timeout
-
-    def _new_conn(self):
-        return NpipeHTTPConnection(
-            self.npipe_path, self.timeout
-        )
-
-    # When re-using connections, urllib3 tries to call select() on our
-    # NpipeSocket instance, causing a crash. To circumvent this, we override
-    # _get_conn, where that check happens.
-    def _get_conn(self, timeout):
-        conn = None
-        try:
-            conn = self.pool.get(block=self.block, timeout=timeout)
-
-        except AttributeError:  # self.pool is None
-            raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
-
-        except six.moves.queue.Empty:
-            if self.block:
-                raise urllib3.exceptions.EmptyPoolError(
-                    self,
-                    "Pool reached maximum size and no more "
-                    "connections are allowed."
-                )
-            pass  # Oh well, we'll create a new connection then
-
-        return conn or self._new_conn()
-
-
-class NpipeAdapter(requests.adapters.HTTPAdapter):
-    def __init__(self, base_url, timeout=60,
-                 num_pools=constants.DEFAULT_NUM_POOLS):
-        self.npipe_path = base_url.replace('npipe://', '')
-        self.timeout = timeout
-        self.pools = RecentlyUsedContainer(
-            num_pools, dispose_func=lambda p: p.close()
-        )
-        super(NpipeAdapter, self).__init__()
-
-    def get_connection(self, url, proxies=None):
-        with self.pools.lock:
-            pool = self.pools.get(url)
-            if pool:
-                return pool
-
-            pool = NpipeHTTPConnectionPool(
-                self.npipe_path, self.timeout
-            )
-            self.pools[url] = pool
-
-        return pool
-
-    def request_url(self, request, proxies):
-        # The select_proxy utility in requests errors out when the provided URL
-        # doesn't have a hostname, like is the case when using a UNIX socket.
-        # Since proxies are an irrelevant notion in the case of UNIX sockets
-        # anyway, we simply return the path URL directly.
-        # See also: https://github.com/docker/docker-py/issues/811
-        return request.path_url
-
-    def close(self):
-        self.pools.clear()
diff --git a/env2/lib/python2.7/site-packages/docker/transport/npipesocket.py b/env2/lib/python2.7/site-packages/docker/transport/npipesocket.py
deleted file mode 100644
index 3b1b644..0000000
--- a/env2/lib/python2.7/site-packages/docker/transport/npipesocket.py
+++ /dev/null
@@ -1,218 +0,0 @@
-import functools
-import io
-
-import six
-import win32file
-import win32pipe
-
-cERROR_PIPE_BUSY = 0xe7
-cSECURITY_SQOS_PRESENT = 0x100000
-cSECURITY_ANONYMOUS = 0
-
-RETRY_WAIT_TIMEOUT = 10000
-
-
-def check_closed(f):
-    @functools.wraps(f)
-    def wrapped(self, *args, **kwargs):
-        if self._closed:
-            raise RuntimeError(
-                'Can not reuse socket after connection was closed.'
-            )
-        return f(self, *args, **kwargs)
-    return wrapped
-
-
-class NpipeSocket(object):
-    """ Partial implementation of the socket API over windows named pipes.
-        This implementation is only designed to be used as a client socket,
-        and server-specific methods (bind, listen, accept...) are not
-        implemented.
-    """
-    def __init__(self, handle=None):
-        self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
-        self._handle = handle
-        self._closed = False
-
-    def accept(self):
-        raise NotImplementedError()
-
-    def bind(self, address):
-        raise NotImplementedError()
-
-    def close(self):
-        self._handle.Close()
-        self._closed = True
-
-    @check_closed
-    def connect(self, address):
-        win32pipe.WaitNamedPipe(address, self._timeout)
-        try:
-            handle = win32file.CreateFile(
-                address,
-                win32file.GENERIC_READ | win32file.GENERIC_WRITE,
-                0,
-                None,
-                win32file.OPEN_EXISTING,
-                cSECURITY_ANONYMOUS | cSECURITY_SQOS_PRESENT,
-                0
-            )
-        except win32pipe.error as e:
-            # See Remarks:
-            # https://msdn.microsoft.com/en-us/library/aa365800.aspx
-            if e.winerror == cERROR_PIPE_BUSY:
-                # Another program or thread has grabbed our pipe instance
-                # before we got to it. Wait for availability and attempt to
-                # connect again.
-                win32pipe.WaitNamedPipe(address, RETRY_WAIT_TIMEOUT)
-                return self.connect(address)
-            raise e
-
-        self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
-
-        self._handle = handle
-        self._address = address
-
-    @check_closed
-    def connect_ex(self, address):
-        return self.connect(address)
-
-    @check_closed
-    def detach(self):
-        self._closed = True
-        return self._handle
-
-    @check_closed
-    def dup(self):
-        return NpipeSocket(self._handle)
-
-    @check_closed
-    def fileno(self):
-        return int(self._handle)
-
-    def getpeername(self):
-        return self._address
-
-    def getsockname(self):
-        return self._address
-
-    def getsockopt(self, level, optname, buflen=None):
-        raise NotImplementedError()
-
-    def ioctl(self, control, option):
-        raise NotImplementedError()
-
-    def listen(self, backlog):
-        raise NotImplementedError()
-
-    def makefile(self, mode=None, bufsize=None):
-        if mode.strip('b') != 'r':
-            raise NotImplementedError()
-        rawio = NpipeFileIOBase(self)
-        if bufsize is None or bufsize <= 0:
-            bufsize = io.DEFAULT_BUFFER_SIZE
-        return io.BufferedReader(rawio, buffer_size=bufsize)
-
-    @check_closed
-    def recv(self, bufsize, flags=0):
-        err, data = win32file.ReadFile(self._handle, bufsize)
-        return data
-
-    @check_closed
-    def recvfrom(self, bufsize, flags=0):
-        data = self.recv(bufsize, flags)
-        return (data, self._address)
-
-    @check_closed
-    def recvfrom_into(self, buf, nbytes=0, flags=0):
-        return self.recv_into(buf, nbytes, flags), self._address
-
-    @check_closed
-    def recv_into(self, buf, nbytes=0):
-        if six.PY2:
-            return self._recv_into_py2(buf, nbytes)
-
-        readbuf = buf
-        if not isinstance(buf, memoryview):
-            readbuf = memoryview(buf)
-
-        err, data = win32file.ReadFile(
-            self._handle,
-            readbuf[:nbytes] if nbytes else readbuf
-        )
-        return len(data)
-
-    def _recv_into_py2(self, buf, nbytes):
-        err, data = win32file.ReadFile(self._handle, nbytes or len(buf))
-        n = len(data)
-        buf[:n] = data
-        return n
-
-    @check_closed
-    def send(self, string, flags=0):
-        err, nbytes = win32file.WriteFile(self._handle, string)
-        return nbytes
-
-    @check_closed
-    def sendall(self, string, flags=0):
-        return self.send(string, flags)
-
-    @check_closed
-    def sendto(self, string, address):
-        self.connect(address)
-        return self.send(string)
-
-    def setblocking(self, flag):
-        if flag:
-            return self.settimeout(None)
-        return self.settimeout(0)
-
-    def settimeout(self, value):
-        if value is None:
-            # Blocking mode
-            self._timeout = win32pipe.NMPWAIT_WAIT_FOREVER
-        elif not isinstance(value, (float, int)) or value < 0:
-            raise ValueError('Timeout value out of range')
-        elif value == 0:
-            # Non-blocking mode
-            self._timeout = win32pipe.NMPWAIT_NO_WAIT
-        else:
-            # Timeout mode - Value converted to milliseconds
-            self._timeout = value * 1000
-
-    def gettimeout(self):
-        return self._timeout
-
-    def setsockopt(self, level, optname, value):
-        raise NotImplementedError()
-
-    @check_closed
-    def shutdown(self, how):
-        return self.close()
-
-
-class NpipeFileIOBase(io.RawIOBase):
-    def __init__(self, npipe_socket):
-        self.sock = npipe_socket
-
-    def close(self):
-        super(NpipeFileIOBase, self).close()
-        self.sock = None
-
-    def fileno(self):
-        return self.sock.fileno()
-
-    def isatty(self):
-        return False
-
-    def readable(self):
-        return True
-
-    def readinto(self, buf):
-        return self.sock.recv_into(buf)
-
-    def seekable(self):
-        return False
-
-    def writable(self):
-        return False
diff --git a/env2/lib/python2.7/site-packages/docker/transport/unixconn.py b/env2/lib/python2.7/site-packages/docker/transport/unixconn.py
deleted file mode 100644
index b7905a0..0000000
--- a/env2/lib/python2.7/site-packages/docker/transport/unixconn.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import six
-import requests.adapters
-import socket
-
-from .. import constants
-
-if six.PY3:
-    import http.client as httplib
-else:
-    import httplib
-
-try:
-    import requests.packages.urllib3 as urllib3
-except ImportError:
-    import urllib3
-
-
-RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
-
-
-class UnixHTTPConnection(httplib.HTTPConnection, object):
-    def __init__(self, base_url, unix_socket, timeout=60):
-        super(UnixHTTPConnection, self).__init__(
-            'localhost', timeout=timeout
-        )
-        self.base_url = base_url
-        self.unix_socket = unix_socket
-        self.timeout = timeout
-
-    def connect(self):
-        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-        sock.settimeout(self.timeout)
-        sock.connect(self.unix_socket)
-        self.sock = sock
-
-
-class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
-    def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
-        super(UnixHTTPConnectionPool, self).__init__(
-            'localhost', timeout=timeout, maxsize=maxsize
-        )
-        self.base_url = base_url
-        self.socket_path = socket_path
-        self.timeout = timeout
-
-    def _new_conn(self):
-        return UnixHTTPConnection(
-            self.base_url, self.socket_path, self.timeout
-        )
-
-
-class UnixAdapter(requests.adapters.HTTPAdapter):
-    def __init__(self, socket_url, timeout=60,
-                 num_pools=constants.DEFAULT_NUM_POOLS):
-        socket_path = socket_url.replace('http+unix://', '')
-        if not socket_path.startswith('/'):
-            socket_path = '/' + socket_path
-        self.socket_path = socket_path
-        self.timeout = timeout
-        self.pools = RecentlyUsedContainer(
-            num_pools, dispose_func=lambda p: p.close()
-        )
-        super(UnixAdapter, self).__init__()
-
-    def get_connection(self, url, proxies=None):
-        with self.pools.lock:
-            pool = self.pools.get(url)
-            if pool:
-                return pool
-
-            pool = UnixHTTPConnectionPool(
-                url, self.socket_path, self.timeout
-            )
-            self.pools[url] = pool
-
-        return pool
-
-    def request_url(self, request, proxies):
-        # The select_proxy utility in requests errors out when the provided URL
-        # doesn't have a hostname, like is the case when using a UNIX socket.
-        # Since proxies are an irrelevant notion in the case of UNIX sockets
-        # anyway, we simply return the path URL directly.
-        # See also: https://github.com/docker/docker-py/issues/811
-        return request.path_url
-
-    def close(self):
-        self.pools.clear()
diff --git a/env2/lib/python2.7/site-packages/docker/types/__init__.py b/env2/lib/python2.7/site-packages/docker/types/__init__.py
deleted file mode 100644
index 3609581..0000000
--- a/env2/lib/python2.7/site-packages/docker/types/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# flake8: noqa
-from .containers import LogConfig, Ulimit
-from .services import (
-    ContainerSpec, DriverConfig, Mount, Resources, RestartPolicy, TaskTemplate,
-    UpdateConfig
-)
-from .swarm import SwarmSpec, SwarmExternalCA
diff --git a/env2/lib/python2.7/site-packages/docker/types/base.py b/env2/lib/python2.7/site-packages/docker/types/base.py
deleted file mode 100644
index 6891062..0000000
--- a/env2/lib/python2.7/site-packages/docker/types/base.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import six
-
-
-class DictType(dict):
-    def __init__(self, init):
-        for k, v in six.iteritems(init):
-            self[k] = v
diff --git a/env2/lib/python2.7/site-packages/docker/types/containers.py b/env2/lib/python2.7/site-packages/docker/types/containers.py
deleted file mode 100644
index 40a44ca..0000000
--- a/env2/lib/python2.7/site-packages/docker/types/containers.py
+++ /dev/null
@@ -1,92 +0,0 @@
-import six
-
-from .base import DictType
-
-
-class LogConfigTypesEnum(object):
-    _values = (
-        'json-file',
-        'syslog',
-        'journald',
-        'gelf',
-        'fluentd',
-        'none'
-    )
-    JSON, SYSLOG, JOURNALD, GELF, FLUENTD, NONE = _values
-
-
-class LogConfig(DictType):
-    types = LogConfigTypesEnum
-
-    def __init__(self, **kwargs):
-        log_driver_type = kwargs.get('type', kwargs.get('Type'))
-        config = kwargs.get('config', kwargs.get('Config')) or {}
-
-        if config and not isinstance(config, dict):
-            raise ValueError("LogConfig.config must be a dictionary")
-
-        super(LogConfig, self).__init__({
-            'Type': log_driver_type,
-            'Config': config
-        })
-
-    @property
-    def type(self):
-        return self['Type']
-
-    @type.setter
-    def type(self, value):
-        self['Type'] = value
-
-    @property
-    def config(self):
-        return self['Config']
-
-    def set_config_value(self, key, value):
-        self.config[key] = value
-
-    def unset_config(self, key):
-        if key in self.config:
-            del self.config[key]
-
-
-class Ulimit(DictType):
-    def __init__(self, **kwargs):
-        name = kwargs.get('name', kwargs.get('Name'))
-        soft = kwargs.get('soft', kwargs.get('Soft'))
-        hard = kwargs.get('hard', kwargs.get('Hard'))
-        if not isinstance(name, six.string_types):
-            raise ValueError("Ulimit.name must be a string")
-        if soft and not isinstance(soft, int):
-            raise ValueError("Ulimit.soft must be an integer")
-        if hard and not isinstance(hard, int):
-            raise ValueError("Ulimit.hard must be an integer")
-        super(Ulimit, self).__init__({
-            'Name': name,
-            'Soft': soft,
-            'Hard': hard
-        })
-
-    @property
-    def name(self):
-        return self['Name']
-
-    @name.setter
-    def name(self, value):
-        self['Name'] = value
-
-    @property
-    def soft(self):
-        return self.get('Soft')
-
-    @soft.setter
-    def soft(self, value):
-        self['Soft'] = value
-
-    @property
-    def hard(self):
-        return self.get('Hard')
-
-    @hard.setter
-    def hard(self, value):
-        self['Hard'] = value
diff --git a/env2/lib/python2.7/site-packages/docker/types/services.py b/env2/lib/python2.7/site-packages/docker/types/services.py
deleted file mode 100644
index 063779c..0000000
--- a/env2/lib/python2.7/site-packages/docker/types/services.py
+++ /dev/null
@@ -1,181 +0,0 @@
-import six
-
-from .. import errors
-
-
-class TaskTemplate(dict):
-    def __init__(self, container_spec, resources=None, restart_policy=None,
-                 placement=None, log_driver=None):
-        self['ContainerSpec'] = container_spec
-        if resources:
-            self['Resources'] = resources
-        if restart_policy:
-            self['RestartPolicy'] = restart_policy
-        if placement:
-            self['Placement'] = placement
-        if log_driver:
-            self['LogDriver'] = log_driver
-
-    @property
-    def container_spec(self):
-        return self.get('ContainerSpec')
-
-    @property
-    def resources(self):
-        return self.get('Resources')
-
-    @property
-    def restart_policy(self):
-        return self.get('RestartPolicy')
-
-    @property
-    def placement(self):
-        return self.get('Placement')
-
-
-class ContainerSpec(dict):
-    def __init__(self, image, command=None, args=None, env=None, workdir=None,
-                 user=None, labels=None, mounts=None, stop_grace_period=None):
-        from ..utils import split_command  # FIXME: circular import
-
-        self['Image'] = image
-
-        if isinstance(command, six.string_types):
-            command = split_command(command)
-        self['Command'] = command
-        self['Args'] = args
-
-        if env is not None:
-            self['Env'] = env
-        if workdir is not None:
-            self['Dir'] = workdir
-        if user is not None:
-            self['User'] = user
-        if labels is not None:
-            self['Labels'] = labels
-        if mounts is not None:
-            for mount in mounts:
-                if isinstance(mount, six.string_types):
-                    mounts.append(Mount.parse_mount_string(mount))
-                    mounts.remove(mount)
-            self['Mounts'] = mounts
-        if stop_grace_period is not None:
-            self['StopGracePeriod'] = stop_grace_period
-
-
-class Mount(dict):
-    def __init__(self, target, source, type='volume', read_only=False,
-                 propagation=None, no_copy=False, labels=None,
-                 driver_config=None):
-        self['Target'] = target
-        self['Source'] = source
-        if type not in ('bind', 'volume'):
-            raise errors.DockerError(
-                'Only acceptable mount types are `bind` and `volume`.'
-            )
-        self['Type'] = type
-
-        if type == 'bind':
-            if propagation is not None:
-                self['BindOptions'] = {
-                    'Propagation': propagation
-                }
-            if any([labels, driver_config, no_copy]):
-                raise errors.DockerError(
-                    'Mount type is binding but volume options have been '
-                    'provided.'
-                )
-        else:
-            volume_opts = {}
-            if no_copy:
-                volume_opts['NoCopy'] = True
-            if labels:
-                volume_opts['Labels'] = labels
-            if driver_config:
-                volume_opts['driver_config'] = driver_config
-            if volume_opts:
-                self['VolumeOptions'] = volume_opts
-            if propagation:
-                raise errors.DockerError(
-                    'Mount type is volume but `propagation` argument has been '
-                    'provided.'
-                )
-
-    @classmethod
-    def parse_mount_string(cls, string):
-        parts = string.split(':')
-        if len(parts) > 3:
-            raise errors.DockerError(
-                'Invalid mount format "{0}"'.format(string)
-            )
-        if len(parts) == 1:
-            return cls(target=parts[0])
-        else:
-            target = parts[1]
-            source = parts[0]
-            read_only = not (len(parts) == 3 or parts[2] == 'ro')
-            return cls(target, source, read_only=read_only)
-
-
-class Resources(dict):
-    def __init__(self, cpu_limit=None, mem_limit=None, cpu_reservation=None,
-                 mem_reservation=None):
-        limits = {}
-        reservation = {}
-        if cpu_limit is not None:
-            limits['NanoCPUs'] = cpu_limit
-        if mem_limit is not None:
-            limits['MemoryBytes'] = mem_limit
-        if cpu_reservation is not None:
-            reservation['NanoCPUs'] = cpu_reservation
-        if mem_reservation is not None:
-            reservation['MemoryBytes'] = mem_reservation
-
-        if limits:
-            self['Limits'] = limits
-        if reservation:
-            self['Reservations'] = reservation
-
-
-class UpdateConfig(dict):
-    def __init__(self, parallelism=0, delay=None, failure_action='continue'):
-        self['Parallelism'] = parallelism
-        if delay is not None:
-            self['Delay'] = delay
-        if failure_action not in ('pause', 'continue'):
-            raise errors.DockerError(
-                'failure_action must be either `pause` or `continue`.'
-            )
-        self['FailureAction'] = failure_action
-
-
-class RestartConditionTypesEnum(object):
-    _values = (
-        'none',
-        'on-failure',
-        'any',
-    )
-    NONE, ON_FAILURE, ANY = _values
-
-
-class RestartPolicy(dict):
-    condition_types = RestartConditionTypesEnum
-
-    def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0,
-                 max_attempts=0, window=0):
-        if condition not in self.condition_types._values:
-            raise TypeError(
-                'Invalid RestartPolicy condition {0}'.format(condition)
-            )
-
-        self['Condition'] = condition
-        self['Delay'] = delay
-        self['MaxAttempts'] = max_attempts
-        self['Window'] = window
-
-
-class DriverConfig(dict):
-    def __init__(self, name, options=None):
-        self['Name'] = name
-        if options:
-            self['Options'] = options
diff --git a/env2/lib/python2.7/site-packages/docker/types/swarm.py b/env2/lib/python2.7/site-packages/docker/types/swarm.py
deleted file mode 100644
index 865fde6..0000000
--- a/env2/lib/python2.7/site-packages/docker/types/swarm.py
+++ /dev/null
@@ -1,40 +0,0 @@
-class SwarmSpec(dict):
-    def __init__(self, task_history_retention_limit=None,
-                 snapshot_interval=None, keep_old_snapshots=None,
-                 log_entries_for_slow_followers=None, heartbeat_tick=None,
-                 election_tick=None, dispatcher_heartbeat_period=None,
-                 node_cert_expiry=None, external_ca=None, name=None):
-        if task_history_retention_limit is not None:
-            self['Orchestration'] = {
-                'TaskHistoryRetentionLimit': task_history_retention_limit
-            }
-        if any([snapshot_interval, keep_old_snapshots,
-               log_entries_for_slow_followers, heartbeat_tick, election_tick]):
-            self['Raft'] = {
-                'SnapshotInterval': snapshot_interval,
-                'KeepOldSnapshots': keep_old_snapshots,
-                'LogEntriesForSlowFollowers': log_entries_for_slow_followers,
-                'HeartbeatTick': heartbeat_tick,
-                'ElectionTick': election_tick
-            }
-
-        if dispatcher_heartbeat_period:
-            self['Dispatcher'] = {
-                'HeartbeatPeriod': dispatcher_heartbeat_period
-            }
-
-        if node_cert_expiry or external_ca:
-            self['CAConfig'] = {
-                'NodeCertExpiry': node_cert_expiry,
-                'ExternalCA': external_ca
-            }
-
-        if name is not None:
-            self['Name'] = name
-
-
-class SwarmExternalCA(dict):
-    def __init__(self, url, protocol=None, options=None):
-        self['URL'] = url
-        self['Protocol'] = protocol
-        self['Options'] = options
diff --git a/env2/lib/python2.7/site-packages/docker/utils/__init__.py b/env2/lib/python2.7/site-packages/docker/utils/__init__.py
deleted file mode 100644
index 4bb3876..0000000
--- a/env2/lib/python2.7/site-packages/docker/utils/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# flake8: noqa
-from .utils import (
-    compare_version, convert_port_bindings, convert_volume_binds,
-    mkbuildcontext, tar, exclude_paths, parse_repository_tag, parse_host,
-    kwargs_from_env, convert_filters, datetime_to_timestamp,
-    create_host_config, create_container_config, parse_bytes, ping_registry,
-    parse_env_file, version_lt, version_gte, decode_json_header, split_command,
-    create_ipam_config, create_ipam_pool, parse_devices, normalize_links,
-)
-
-from ..types import LogConfig, Ulimit
-from ..types import SwarmExternalCA, SwarmSpec
-from .decorators import check_resource, minimum_version, update_headers
diff --git a/env2/lib/python2.7/site-packages/docker/utils/decorators.py b/env2/lib/python2.7/site-packages/docker/utils/decorators.py
deleted file mode 100644
index 2fe880c..0000000
--- a/env2/lib/python2.7/site-packages/docker/utils/decorators.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import functools
-
-from .. import errors
-from . import utils
-
-
-def check_resource(f):
-    @functools.wraps(f)
-    def wrapped(self, resource_id=None, *args, **kwargs):
-        if resource_id is None:
-            if kwargs.get('container'):
-                resource_id = kwargs.pop('container')
-            elif kwargs.get('image'):
-                resource_id = kwargs.pop('image')
-        if isinstance(resource_id, dict):
-            resource_id = resource_id.get('Id', resource_id.get('ID'))
-        if not resource_id:
-            raise errors.NullResource(
-                'image or container param is undefined'
-            )
-        return f(self, resource_id, *args, **kwargs)
-    return wrapped
-
-
-def minimum_version(version):
-    def decorator(f):
-        @functools.wraps(f)
-        def wrapper(self, *args, **kwargs):
-            if utils.version_lt(self._version, version):
-                raise errors.InvalidVersion(
-                    '{0} is not available for version < {1}'.format(
-                        f.__name__, version
-                    )
-                )
-            return f(self, *args, **kwargs)
-        return wrapper
-    return decorator
-
-
-def update_headers(f):
-    def inner(self, *args, **kwargs):
-        if 'HttpHeaders' in self._auth_configs:
-            if not kwargs.get('headers'):
-                kwargs['headers'] = self._auth_configs['HttpHeaders']
-            else:
-                kwargs['headers'].update(self._auth_configs['HttpHeaders'])
-        return f(self, *args, **kwargs)
-    return inner
diff --git a/env2/lib/python2.7/site-packages/docker/utils/ports/__init__.py b/env2/lib/python2.7/site-packages/docker/utils/ports/__init__.py
deleted file mode 100644
index 1dbfa3a..0000000
--- a/env2/lib/python2.7/site-packages/docker/utils/ports/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .ports import (
-    split_port,
-    build_port_bindings
-) # flake8: noqa
diff --git a/env2/lib/python2.7/site-packages/docker/utils/ports/ports.py b/env2/lib/python2.7/site-packages/docker/utils/ports/ports.py
deleted file mode 100644
index 326ef94..0000000
--- a/env2/lib/python2.7/site-packages/docker/utils/ports/ports.py
+++ /dev/null
@@ -1,92 +0,0 @@
-
-def add_port_mapping(port_bindings, internal_port, external):
-    if internal_port in port_bindings:
-        port_bindings[internal_port].append(external)
-    else:
-        port_bindings[internal_port] = [external]
-
-
-def add_port(port_bindings, internal_port_range, external_range):
-    if external_range is None:
-        for internal_port in internal_port_range:
-            add_port_mapping(port_bindings, internal_port, None)
-    else:
-        ports = zip(internal_port_range, external_range)
-        for internal_port, external_port in ports:
-            add_port_mapping(port_bindings, internal_port, external_port)
-
-
-def build_port_bindings(ports):
-    port_bindings = {}
-    for port in ports:
-        internal_port_range, external_range = split_port(port)
-        add_port(port_bindings, internal_port_range, external_range)
-    return port_bindings
-
-
-def to_port_range(port):
-    if not port:
-        return None
-
-    protocol = ""
-    if "/" in port:
-        parts = port.split("/")
-        if len(parts) != 2:
-            _raise_invalid_port(port)
-
-        port, protocol = parts
-        protocol = "/" + protocol
-
-    parts = str(port).split('-')
-
-    if len(parts) == 1:
-        return ["%s%s" % (port, protocol)]
-
-    if len(parts) == 2:
-        full_port_range = range(int(parts[0]), int(parts[1]) + 1)
-        return ["%s%s" % (p, protocol) for p in full_port_range]
-
-    raise ValueError('Invalid port range "%s", should be '
-                     'port or startport-endport' % port)
-
-
-def _raise_invalid_port(port):
-    raise ValueError('Invalid port "%s", should be '
-                     '[[remote_ip:]remote_port[-remote_port]:]'
-                     'port[/protocol]' % port)
-
-
-def split_port(port):
-    parts = str(port).split(':')
-
-    if not 1 <= len(parts) <= 3:
-        _raise_invalid_port(port)
-
-    if len(parts) == 1:
-        internal_port, = parts
-        return to_port_range(internal_port), None
-    if len(parts) == 2:
-        external_port, internal_port = parts
-
-        internal_range = to_port_range(internal_port)
-        external_range = to_port_range(external_port)
-
-        if internal_range is None or external_range is None:
-            _raise_invalid_port(port)
-
-        if len(internal_range) != len(external_range):
-            raise ValueError('Port ranges don\'t match in length')
-
-        return internal_range, external_range
-
-    external_ip, external_port, internal_port = parts
-    internal_range = to_port_range(internal_port)
-    external_range = to_port_range(external_port)
-    if not external_range:
-        external_range = [None] * len(internal_range)
-
-    if len(internal_range) != len(external_range):
-        raise ValueError('Port ranges don\'t match in length')
-
-    return internal_range, [(external_ip, ex_port or None)
-                            for ex_port in external_range]
diff --git a/env2/lib/python2.7/site-packages/docker/utils/socket.py b/env2/lib/python2.7/site-packages/docker/utils/socket.py
deleted file mode 100644
index 164b845..0000000
--- a/env2/lib/python2.7/site-packages/docker/utils/socket.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import errno
-import os
-import select
-import struct
-
-import six
-
-try:
-    from ..transport import NpipeSocket
-except ImportError:
-    NpipeSocket = type(None)
-
-
-class SocketError(Exception):
-    pass
-
-
-def read(socket, n=4096):
-    """
-    Reads at most n bytes from socket
-    """
-
-    recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
-
-    # wait for data to become available
-    if not isinstance(socket, NpipeSocket):
-        select.select([socket], [], [])
-
-    try:
-        if hasattr(socket, 'recv'):
-            return socket.recv(n)
-        return os.read(socket.fileno(), n)
-    except EnvironmentError as e:
-        if e.errno not in recoverable_errors:
-            raise
-
-
-def read_exactly(socket, n):
-    """
-    Reads exactly n bytes from socket
-    Raises SocketError if there isn't enough data
-    """
-    data = six.binary_type()
-    while len(data) < n:
-        next_data = read(socket, n - len(data))
-        if not next_data:
-            raise SocketError("Unexpected EOF")
-        data += next_data
-    return data
-
-
-def next_frame_size(socket):
-    """
-    Returns the size of the next frame of data waiting to be read from socket,
-    according to the protocol defined here:
-
-    https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container
-    """
-    try:
-        data = read_exactly(socket, 8)
-    except SocketError:
-        return 0
-
-    _, actual = struct.unpack('>BxxxL', data)
-    return actual
-
-
-def frames_iter(socket):
-    """
-    Returns a generator of frames read from socket
-    """
-    n = next_frame_size(socket)
-    while n > 0:
-        yield read(socket, n)
-        n = next_frame_size(socket)
diff --git a/env2/lib/python2.7/site-packages/docker/utils/types.py b/env2/lib/python2.7/site-packages/docker/utils/types.py
deleted file mode 100644
index 8098c47..0000000
--- a/env2/lib/python2.7/site-packages/docker/utils/types.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# Compatibility module. See https://github.com/docker/docker-py/issues/1196
-
-import warnings
-
-from ..types import Ulimit, LogConfig  # flake8: noqa
-
-warnings.warn('docker.utils.types is now docker.types', ImportWarning)
diff --git a/env2/lib/python2.7/site-packages/docker/utils/utils.py b/env2/lib/python2.7/site-packages/docker/utils/utils.py
deleted file mode 100644
index 8d55b57..0000000
--- a/env2/lib/python2.7/site-packages/docker/utils/utils.py
+++ /dev/null
@@ -1,1139 +0,0 @@
-import base64
-import io
-import os
-import os.path
-import json
-import shlex
-import tarfile
-import tempfile
-import warnings
-from distutils.version import StrictVersion
-from datetime import datetime
-from fnmatch import fnmatch
-
-import requests
-import six
-
-from .. import constants
-from .. import errors
-from .. import tls
-from ..types import Ulimit, LogConfig
-
-if six.PY2:
-    from urllib import splitnport
-else:
-    from urllib.parse import splitnport
-
-DEFAULT_HTTP_HOST = "127.0.0.1"
-DEFAULT_UNIX_SOCKET = "http+unix://var/run/docker.sock"
-DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
-
-BYTE_UNITS = {
-    'b': 1,
-    'k': 1024,
-    'm': 1024 * 1024,
-    'g': 1024 * 1024 * 1024
-}
-
-
-def create_ipam_pool(subnet=None, iprange=None, gateway=None,
-                     aux_addresses=None):
-    return {
-        'Subnet': subnet,
-        'IPRange': iprange,
-        'Gateway': gateway,
-        'AuxiliaryAddresses': aux_addresses
-    }
-
-
-def create_ipam_config(driver='default', pool_configs=None):
-    return {
-        'Driver': driver,
-        'Config': pool_configs or []
-    }
-
-
-def mkbuildcontext(dockerfile):
-    f = tempfile.NamedTemporaryFile()
-    t = tarfile.open(mode='w', fileobj=f)
-    if isinstance(dockerfile, io.StringIO):
-        dfinfo = tarfile.TarInfo('Dockerfile')
-        if six.PY3:
-            raise TypeError('Please use io.BytesIO to create in-memory '
-                            'Dockerfiles with Python 3')
-        else:
-            dfinfo.size = len(dockerfile.getvalue())
-            dockerfile.seek(0)
-    elif isinstance(dockerfile, io.BytesIO):
-        dfinfo = tarfile.TarInfo('Dockerfile')
-        dfinfo.size = len(dockerfile.getvalue())
-        dockerfile.seek(0)
-    else:
-        dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
-    t.addfile(dfinfo, dockerfile)
-    t.close()
-    f.seek(0)
-    return f
-
-
-def decode_json_header(header):
-    data = base64.b64decode(header)
-    if six.PY3:
-        data = data.decode('utf-8')
-    return json.loads(data)
-
-
-def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
-    if not fileobj:
-        fileobj = tempfile.NamedTemporaryFile()
-    t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
-
-    root = os.path.abspath(path)
-    exclude = exclude or []
-
-    for path in sorted(exclude_paths(root, exclude, dockerfile=dockerfile)):
-        t.add(os.path.join(root, path), arcname=path, recursive=False)
-
-    t.close()
-    fileobj.seek(0)
-    return fileobj
-
-
-def exclude_paths(root, patterns, dockerfile=None):
-    """
-    Given a root directory path and a list of .dockerignore patterns, return
-    an iterator of all paths (both regular files and directories) in the root
-    directory that do *not* match any of the patterns.
-
-    All paths returned are relative to the root.
-    """
-    if dockerfile is None:
-        dockerfile = 'Dockerfile'
-
-    exceptions = [p for p in patterns if p.startswith('!')]
-
-    include_patterns = [p[1:] for p in exceptions]
-    include_patterns += [dockerfile, '.dockerignore']
-
-    exclude_patterns = list(set(patterns) - set(exceptions))
-
-    paths = get_paths(root, exclude_patterns, include_patterns,
-                      has_exceptions=len(exceptions) > 0)
-
-    return set(paths).union(
-        # If the Dockerfile is in a subdirectory that is excluded, get_paths
-        # will not descend into it and the file will be skipped. This ensures
-        # it doesn't happen.
-        set([dockerfile])
-        if os.path.exists(os.path.join(root, dockerfile)) else set()
-    )
-
-
-def should_include(path, exclude_patterns, include_patterns):
-    """
-    Given a path, a list of exclude patterns, and a list of inclusion patterns:
-
-    1. Returns True if the path doesn't match any exclusion pattern
-    2. Returns False if the path matches an exclusion pattern and doesn't match
-       an inclusion pattern
-    3. Returns true if the path matches an exclusion pattern and matches an
-       inclusion pattern
-    """
-    for pattern in exclude_patterns:
-        if match_path(path, pattern):
-            for pattern in include_patterns:
-                if match_path(path, pattern):
-                    return True
-            return False
-    return True
-
-
-def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False):
-    paths = []
-
-    for parent, dirs, files in os.walk(root, topdown=True, followlinks=False):
-        parent = os.path.relpath(parent, root)
-        if parent == '.':
-            parent = ''
-
-        # If exception rules exist, we can't skip recursing into ignored
-        # directories, as we need to look for exceptions in them.
-        #
-        # It may be possible to optimize this further for exception patterns
-        # that *couldn't* match within ignored directores.
-        #
-        # This matches the current docker logic (as of 2015-11-24):
-        # https://github.com/docker/docker/blob/37ba67bf636b34dc5c0c0265d62a089d0492088f/pkg/archive/archive.go#L555-L557
-
-        if not has_exceptions:
-
-            # Remove excluded patterns from the list of directories to traverse
-            # by mutating the dirs we're iterating over.
-            # This looks strange, but is considered the correct way to skip
-            # traversal. See https://docs.python.org/2/library/os.html#os.walk
-
-            dirs[:] = [d for d in dirs if
-                       should_include(os.path.join(parent, d),
-                                      exclude_patterns, include_patterns)]
-
-        for path in dirs:
-            if should_include(os.path.join(parent, path),
-                              exclude_patterns, include_patterns):
-                paths.append(os.path.join(parent, path))
-
-        for path in files:
-            if should_include(os.path.join(parent, path),
-                              exclude_patterns, include_patterns):
-                paths.append(os.path.join(parent, path))
-
-    return paths
-
-
-def match_path(path, pattern):
-    pattern = pattern.rstrip('/')
-    if pattern:
-        pattern = os.path.relpath(pattern)
-
-    pattern_components = pattern.split(os.path.sep)
-    path_components = path.split(os.path.sep)[:len(pattern_components)]
-    return fnmatch('/'.join(path_components), pattern)
-
-
-def compare_version(v1, v2):
-    """Compare docker versions
-
-    >>> v1 = '1.9'
-    >>> v2 = '1.10'
-    >>> compare_version(v1, v2)
-    1
-    >>> compare_version(v2, v1)
-    -1
-    >>> compare_version(v2, v2)
-    0
-    """
-    s1 = StrictVersion(v1)
-    s2 = StrictVersion(v2)
-    if s1 == s2:
-        return 0
-    elif s1 > s2:
-        return -1
-    else:
-        return 1
-
-
-def version_lt(v1, v2):
-    return compare_version(v1, v2) > 0
-
-
-def version_gte(v1, v2):
-    return not version_lt(v1, v2)
-
-
-def ping_registry(url):
-    warnings.warn(
-        'The `ping_registry` method is deprecated and will be removed.',
-        DeprecationWarning
-    )
-
-    return ping(url + '/v2/', [401]) or ping(url + '/v1/_ping')
-
-
-def ping(url, valid_4xx_statuses=None):
-    try:
-        res = requests.get(url, timeout=3)
-    except Exception:
-        return False
-    else:
-        # We don't send yet auth headers
-        # and a v2 registry will respond with status 401
-        return (
-            res.status_code < 400 or
-            (valid_4xx_statuses and res.status_code in valid_4xx_statuses)
-        )
-
-
-def _convert_port_binding(binding):
-    result = {'HostIp': '', 'HostPort': ''}
-    if isinstance(binding, tuple):
-        if len(binding) == 2:
-            result['HostPort'] = binding[1]
-            result['HostIp'] = binding[0]
-        elif isinstance(binding[0], six.string_types):
-            result['HostIp'] = binding[0]
-        else:
-            result['HostPort'] = binding[0]
-    elif isinstance(binding, dict):
-        if 'HostPort' in binding:
-            result['HostPort'] = binding['HostPort']
-            if 'HostIp' in binding:
-                result['HostIp'] = binding['HostIp']
-        else:
-            raise ValueError(binding)
-    else:
-        result['HostPort'] = binding
-
-    if result['HostPort'] is None:
-        result['HostPort'] = ''
-    else:
-        result['HostPort'] = str(result['HostPort'])
-
-    return result
-
-
-def convert_port_bindings(port_bindings):
-    result = {}
-    for k, v in six.iteritems(port_bindings):
-        key = str(k)
-        if '/' not in key:
-            key += '/tcp'
-        if isinstance(v, list):
-            result[key] = [_convert_port_binding(binding) for binding in v]
-        else:
-            result[key] = [_convert_port_binding(v)]
-    return result
-
-
-def convert_volume_binds(binds):
-    if isinstance(binds, list):
-        return binds
-
-    result = []
-    for k, v in binds.items():
-        if isinstance(k, six.binary_type):
-            k = k.decode('utf-8')
-
-        if isinstance(v, dict):
-            if 'ro' in v and 'mode' in v:
-                raise ValueError(
-                    'Binding cannot contain both "ro" and "mode": {}'
-                    .format(repr(v))
-                )
-
-            bind = v['bind']
-            if isinstance(bind, six.binary_type):
-                bind = bind.decode('utf-8')
-
-            if 'ro' in v:
-                mode = 'ro' if v['ro'] else 'rw'
-            elif 'mode' in v:
-                mode = v['mode']
-            else:
-                mode = 'rw'
-
-            result.append(
-                six.text_type('{0}:{1}:{2}').format(k, bind, mode)
-            )
-        else:
-            if isinstance(v, six.binary_type):
-                v = v.decode('utf-8')
-            result.append(
-                six.text_type('{0}:{1}:rw').format(k, v)
-            )
-    return result
-
-
-def convert_tmpfs_mounts(tmpfs):
-    if isinstance(tmpfs, dict):
-        return tmpfs
-
-    if not isinstance(tmpfs, list):
-        raise ValueError(
-            'Expected tmpfs value to be either a list or a dict, found: {}'
-            .format(type(tmpfs).__name__)
-        )
-
-    result = {}
-    for mount in tmpfs:
-        if isinstance(mount, six.string_types):
-            if ":" in mount:
-                name, options = mount.split(":", 1)
-            else:
-                name = mount
-                options = ""
-
-        else:
-            raise ValueError(
-                "Expected item in tmpfs list to be a string, found: {}"
-                .format(type(mount).__name__)
-            )
-
-        result[name] = options
-    return result
-
-
-def parse_repository_tag(repo_name):
-    parts = repo_name.rsplit('@', 1)
-    if len(parts) == 2:
-        return tuple(parts)
-    parts = repo_name.rsplit(':', 1)
-    if len(parts) == 2 and '/' not in parts[1]:
-        return tuple(parts)
-    return repo_name, None
-
-
-# Based on utils.go:ParseHost http://tinyurl.com/nkahcfh
-# fd:// protocol unsupported (for obvious reasons)
-# Added support for http and https
-# Protocol translation: tcp -> http, unix -> http+unix
-def parse_host(addr, is_win32=False, tls=False):
-    proto = "http+unix"
-    port = None
-    path = ''
-
-    if not addr and is_win32:
-        addr = DEFAULT_NPIPE
-
-    if not addr or addr.strip() == 'unix://':
-        return DEFAULT_UNIX_SOCKET
-
-    addr = addr.strip()
-    if addr.startswith('http://'):
-        addr = addr.replace('http://', 'tcp://')
-    if addr.startswith('http+unix://'):
-        addr = addr.replace('http+unix://', 'unix://')
-
-    if addr == 'tcp://':
-        raise errors.DockerException(
-            "Invalid bind address format: {0}".format(addr)
-        )
-    elif addr.startswith('unix://'):
-        addr = addr[7:]
-    elif addr.startswith('tcp://'):
-        proto = 'http{0}'.format('s' if tls else '')
-        addr = addr[6:]
-    elif addr.startswith('https://'):
-        proto = "https"
-        addr = addr[8:]
-    elif addr.startswith('npipe://'):
-        proto = 'npipe'
-        addr = addr[8:]
-    elif addr.startswith('fd://'):
-        raise errors.DockerException("fd protocol is not implemented")
-    else:
-        if "://" in addr:
-            raise errors.DockerException(
-                "Invalid bind address protocol: {0}".format(addr)
-            )
-        proto = "https" if tls else "http"
-
-    if proto in ("http", "https"):
-        address_parts = addr.split('/', 1)
-        host = address_parts[0]
-        if len(address_parts) == 2:
-            path = '/' + address_parts[1]
-        host, port = splitnport(host)
-
-        if port is None:
-            raise errors.DockerException(
-                "Invalid port: {0}".format(addr)
-            )
-
-        if not host:
-            host = DEFAULT_HTTP_HOST
-    else:
-        host = addr
-
-    if proto in ("http", "https") and port == -1:
-        raise errors.DockerException(
-            "Bind address needs a port: {0}".format(addr))
-
-    if proto == "http+unix" or proto == 'npipe':
-        return "{0}://{1}".format(proto, host).rstrip('/')
-    return "{0}://{1}:{2}{3}".format(proto, host, port, path).rstrip('/')
-
-
-def parse_devices(devices):
-    device_list = []
-    for device in devices:
-        if isinstance(device, dict):
-            device_list.append(device)
-            continue
-        if not isinstance(device, six.string_types):
-            raise errors.DockerException(
-                'Invalid device type {0}'.format(type(device))
-            )
-        device_mapping = device.split(':')
-        if device_mapping:
-            path_on_host = device_mapping[0]
-            if len(device_mapping) > 1:
-                path_in_container = device_mapping[1]
-            else:
-                path_in_container = path_on_host
-            if len(device_mapping) > 2:
-                permissions = device_mapping[2]
-            else:
-                permissions = 'rwm'
-            device_list.append({
-                'PathOnHost': path_on_host,
-                'PathInContainer': path_in_container,
-                'CgroupPermissions': permissions
-            })
-    return device_list
-
-
-def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
-    if not environment:
-        environment = os.environ
-    host = environment.get('DOCKER_HOST')
-
-    # empty string for cert path is the same as unset.
-    cert_path = environment.get('DOCKER_CERT_PATH') or None
-
-    # empty string for tls verify counts as "false".
-    # Any value or 'unset' counts as true.
-    tls_verify = environment.get('DOCKER_TLS_VERIFY')
-    if tls_verify == '':
-        tls_verify = False
-    else:
-        tls_verify = tls_verify is not None
-    enable_tls = cert_path or tls_verify
-
-    params = {}
-
-    if host:
-        params['base_url'] = (
-            host.replace('tcp://', 'https://') if enable_tls else host
-        )
-
-    if not enable_tls:
-        return params
-
-    if not cert_path:
-        cert_path = os.path.join(os.path.expanduser('~'), '.docker')
-
-    if not tls_verify and assert_hostname is None:
-        # assert_hostname is a subset of TLS verification,
-        # so if it's not set already then set it to false.
-        assert_hostname = False
-
-    params['tls'] = tls.TLSConfig(
-        client_cert=(os.path.join(cert_path, 'cert.pem'),
-                     os.path.join(cert_path, 'key.pem')),
-        ca_cert=os.path.join(cert_path, 'ca.pem'),
-        verify=tls_verify,
-        ssl_version=ssl_version,
-        assert_hostname=assert_hostname,
-    )
-
-    return params
-
-
-def convert_filters(filters):
-    result = {}
-    for k, v in six.iteritems(filters):
-        if isinstance(v, bool):
-            v = 'true' if v else 'false'
-        if not isinstance(v, list):
-            v = [v, ]
-        result[k] = v
-    return json.dumps(result)
-
-
-def datetime_to_timestamp(dt):
-    """Convert a UTC datetime to a Unix timestamp"""
-    delta = dt - datetime.utcfromtimestamp(0)
-    return delta.seconds + delta.days * 24 * 3600
-
-
-def parse_bytes(s):
-    if isinstance(s, six.integer_types + (float,)):
-        return s
-    if len(s) == 0:
-        return 0
-
-    if s[-2:-1].isalpha() and s[-1].isalpha():
-        if s[-1] == "b" or s[-1] == "B":
-            s = s[:-1]
-    units = BYTE_UNITS
-    suffix = s[-1].lower()
-
-    # Check if the variable is a string representation of an int
-    # without a units part. Assuming that the units are bytes.
-    if suffix.isdigit():
-        digits_part = s
-        suffix = 'b'
-    else:
-        digits_part = s[:-1]
-
-    if suffix in units.keys() or suffix.isdigit():
-        try:
-            digits = int(digits_part)
-        except ValueError:
-            raise errors.DockerException(
-                'Failed converting the string value for memory ({0}) to'
-                ' an integer.'.format(digits_part)
-            )
-
-        # Reconvert to long for the final result
-        s = int(digits * units[suffix])
-    else:
-        raise errors.DockerException(
-            'The specified value for memory ({0}) should specify the'
-            ' units. The postfix should be one of the `b` `k` `m` `g`'
-            ' characters'.format(s)
-        )
-
-    return s
-
-
-def host_config_type_error(param, param_value, expected):
-    error_msg = 'Invalid type for {0} param: expected {1} but found {2}'
-    return TypeError(error_msg.format(param, expected, type(param_value)))
-
-
-def host_config_version_error(param, version, less_than=True):
-    operator = '<' if less_than else '>'
-    error_msg = '{0} param is not supported in API versions {1} {2}'
-    return errors.InvalidVersion(error_msg.format(param, operator, version))
-
-
-def host_config_value_error(param, param_value):
-    error_msg = 'Invalid value for {0} param: {1}'
-    return ValueError(error_msg.format(param, param_value))
-
-
-def create_host_config(binds=None, port_bindings=None, lxc_conf=None,
-                       publish_all_ports=False, links=None, privileged=False,
-                       dns=None, dns_search=None, volumes_from=None,
-                       network_mode=None, restart_policy=None, cap_add=None,
-                       cap_drop=None, devices=None, extra_hosts=None,
-                       read_only=None, pid_mode=None, ipc_mode=None,
-                       security_opt=None, ulimits=None, log_config=None,
-                       mem_limit=None, memswap_limit=None,
-                       mem_reservation=None, kernel_memory=None,
-                       mem_swappiness=None, cgroup_parent=None,
-                       group_add=None, cpu_quota=None,
-                       cpu_period=None, blkio_weight=None,
-                       blkio_weight_device=None, device_read_bps=None,
-                       device_write_bps=None, device_read_iops=None,
-                       device_write_iops=None, oom_kill_disable=False,
-                       shm_size=None, sysctls=None, version=None, tmpfs=None,
-                       oom_score_adj=None, dns_opt=None, cpu_shares=None,
-                       cpuset_cpus=None, userns_mode=None, pids_limit=None):
-
-    host_config = {}
-
-    if not version:
-        warnings.warn(
-            'docker.utils.create_host_config() is deprecated. Please use '
-            'Client.create_host_config() instead.'
-        )
-        version = constants.DEFAULT_DOCKER_API_VERSION
-
-    if mem_limit is not None:
-        host_config['Memory'] = parse_bytes(mem_limit)
-
-    if memswap_limit is not None:
-        host_config['MemorySwap'] = parse_bytes(memswap_limit)
-
-    if mem_reservation:
-        if version_lt(version, '1.21'):
-            raise host_config_version_error('mem_reservation', '1.21')
-
-        host_config['MemoryReservation'] = parse_bytes(mem_reservation)
-
-    if kernel_memory:
-        if version_lt(version, '1.21'):
-            raise host_config_version_error('kernel_memory', '1.21')
-
-        host_config['KernelMemory'] = parse_bytes(kernel_memory)
-
-    if mem_swappiness is not None:
-        if version_lt(version, '1.20'):
-            raise host_config_version_error('mem_swappiness', '1.20')
-        if not isinstance(mem_swappiness, int):
-            raise host_config_type_error(
-                'mem_swappiness', mem_swappiness, 'int'
-            )
-
-        host_config['MemorySwappiness'] = mem_swappiness
-
-    if shm_size is not None:
-        if isinstance(shm_size, six.string_types):
-            shm_size = parse_bytes(shm_size)
-
-        host_config['ShmSize'] = shm_size
-
-    if pid_mode not in (None, 'host'):
-        raise host_config_value_error('pid_mode', pid_mode)
-    elif pid_mode:
-        host_config['PidMode'] = pid_mode
-
-    if ipc_mode:
-        host_config['IpcMode'] = ipc_mode
-
-    if privileged:
-        host_config['Privileged'] = privileged
-
-    if oom_kill_disable:
-        if version_lt(version, '1.20'):
-            raise host_config_version_error('oom_kill_disable', '1.19')
-
-        host_config['OomKillDisable'] = oom_kill_disable
-
-    if oom_score_adj:
-        if version_lt(version, '1.22'):
-            raise host_config_version_error('oom_score_adj', '1.22')
-        if not isinstance(oom_score_adj, int):
-            raise host_config_type_error(
-                'oom_score_adj', oom_score_adj, 'int'
-            )
-        host_config['OomScoreAdj'] = oom_score_adj
-
-    if publish_all_ports:
-        host_config['PublishAllPorts'] = publish_all_ports
-
-    if read_only is not None:
-        host_config['ReadonlyRootfs'] = read_only
-
-    if dns_search:
-        host_config['DnsSearch'] = dns_search
-
-    if network_mode:
-        host_config['NetworkMode'] = network_mode
-    elif network_mode is None and compare_version('1.19', version) > 0:
-        host_config['NetworkMode'] = 'default'
-
-    if restart_policy:
-        if not isinstance(restart_policy, dict):
-            raise host_config_type_error(
-                'restart_policy', restart_policy, 'dict'
-            )
-
-        host_config['RestartPolicy'] = restart_policy
-
-    if cap_add:
-        host_config['CapAdd'] = cap_add
-
-    if cap_drop:
-        host_config['CapDrop'] = cap_drop
-
-    if devices:
-        host_config['Devices'] = parse_devices(devices)
-
-    if group_add:
-        if version_lt(version, '1.20'):
-            raise host_config_version_error('group_add', '1.20')
-
-        host_config['GroupAdd'] = [six.text_type(grp) for grp in group_add]
-
-    if dns is not None:
-        host_config['Dns'] = dns
-
-    if dns_opt is not None:
-        if version_lt(version, '1.21'):
-            raise host_config_version_error('dns_opt', '1.21')
-
-        host_config['DnsOptions'] = dns_opt
-
-    if security_opt is not None:
-        if not isinstance(security_opt, list):
-            raise host_config_type_error('security_opt', security_opt, 'list')
-
-        host_config['SecurityOpt'] = security_opt
-
-    if sysctls:
-        if not isinstance(sysctls, dict):
-            raise host_config_type_error('sysctls', sysctls, 'dict')
-        host_config['Sysctls'] = {}
-        for k, v in six.iteritems(sysctls):
-            host_config['Sysctls'][k] = six.text_type(v)
-
-    if volumes_from is not None:
-        if isinstance(volumes_from, six.string_types):
-            volumes_from = volumes_from.split(',')
-
-        host_config['VolumesFrom'] = volumes_from
-
-    if binds is not None:
-        host_config['Binds'] = convert_volume_binds(binds)
-
-    if port_bindings is not None:
-        host_config['PortBindings'] = convert_port_bindings(port_bindings)
-
-    if extra_hosts is not None:
-        if isinstance(extra_hosts, dict):
-            extra_hosts = [
-                '{0}:{1}'.format(k, v)
-                for k, v in sorted(six.iteritems(extra_hosts))
-            ]
-
-        host_config['ExtraHosts'] = extra_hosts
-
-    if links is not None:
-        host_config['Links'] = normalize_links(links)
-
-    if isinstance(lxc_conf, dict):
-        formatted = []
-        for k, v in six.iteritems(lxc_conf):
-            formatted.append({'Key': k, 'Value': str(v)})
-        lxc_conf = formatted
-
-    if lxc_conf is not None:
-        host_config['LxcConf'] = lxc_conf
-
-    if cgroup_parent is not None:
-        host_config['CgroupParent'] = cgroup_parent
-
-    if ulimits is not None:
-        if not isinstance(ulimits, list):
-            raise host_config_type_error('ulimits', ulimits, 'list')
-        host_config['Ulimits'] = []
-        for l in ulimits:
-            if not isinstance(l, Ulimit):
-                l = Ulimit(**l)
-            host_config['Ulimits'].append(l)
-
-    if log_config is not None:
-        if not isinstance(log_config, LogConfig):
-            if not isinstance(log_config, dict):
-                raise host_config_type_error(
-                    'log_config', log_config, 'LogConfig'
-                )
-            log_config = LogConfig(**log_config)
-
-        host_config['LogConfig'] = log_config
-
-    if cpu_quota:
-        if not isinstance(cpu_quota, int):
-            raise host_config_type_error('cpu_quota', cpu_quota, 'int')
-        if version_lt(version, '1.19'):
-            raise host_config_version_error('cpu_quota', '1.19')
-
-        host_config['CpuQuota'] = cpu_quota
-
-    if cpu_period:
-        if not isinstance(cpu_period, int):
-            raise host_config_type_error('cpu_period', cpu_period, 'int')
-        if version_lt(version, '1.19'):
-            raise host_config_version_error('cpu_period', '1.19')
-
-        host_config['CpuPeriod'] = cpu_period
-
-    if cpu_shares:
-        if version_lt(version, '1.18'):
-            raise host_config_version_error('cpu_shares', '1.18')
-
-        if not isinstance(cpu_shares, int):
-            raise host_config_type_error('cpu_shares', cpu_shares, 'int')
-
-        host_config['CpuShares'] = cpu_shares
-
-    if cpuset_cpus:
-        if version_lt(version, '1.18'):
-            raise host_config_version_error('cpuset_cpus', '1.18')
-
-        host_config['CpuSetCpus'] = cpuset_cpus
-
-    if blkio_weight:
-        if not isinstance(blkio_weight, int):
-            raise host_config_type_error('blkio_weight', blkio_weight, 'int')
-        if version_lt(version, '1.22'):
-            raise host_config_version_error('blkio_weight', '1.22')
-        host_config["BlkioWeight"] = blkio_weight
-
-    if blkio_weight_device:
-        if not isinstance(blkio_weight_device, list):
-            raise host_config_type_error(
-                'blkio_weight_device', blkio_weight_device, 'list'
-            )
-        if version_lt(version, '1.22'):
-            raise host_config_version_error('blkio_weight_device', '1.22')
-        host_config["BlkioWeightDevice"] = blkio_weight_device
-
-    if device_read_bps:
-        if not isinstance(device_read_bps, list):
-            raise host_config_type_error(
-                'device_read_bps', device_read_bps, 'list'
-            )
-        if version_lt(version, '1.22'):
-            raise host_config_version_error('device_read_bps', '1.22')
-        host_config["BlkioDeviceReadBps"] = device_read_bps
-
-    if device_write_bps:
-        if not isinstance(device_write_bps, list):
-            raise host_config_type_error(
-                'device_write_bps', device_write_bps, 'list'
-            )
-        if version_lt(version, '1.22'):
-            raise host_config_version_error('device_write_bps', '1.22')
-        host_config["BlkioDeviceWriteBps"] = device_write_bps
-
-    if device_read_iops:
-        if not isinstance(device_read_iops, list):
-            raise host_config_type_error(
-                'device_read_iops', device_read_iops, 'list'
-            )
-        if version_lt(version, '1.22'):
-            raise host_config_version_error('device_read_iops', '1.22')
-        host_config["BlkioDeviceReadIOps"] = device_read_iops
-
-    if device_write_iops:
-        if not isinstance(device_write_iops, list):
-            raise host_config_type_error(
-                'device_write_iops', device_write_iops, 'list'
-            )
-        if version_lt(version, '1.22'):
-            raise host_config_version_error('device_write_iops', '1.22')
-        host_config["BlkioDeviceWriteIOps"] = device_write_iops
-
-    if tmpfs:
-        if version_lt(version, '1.22'):
-            raise host_config_version_error('tmpfs', '1.22')
-        host_config["Tmpfs"] = convert_tmpfs_mounts(tmpfs)
-
-    if userns_mode:
-        if version_lt(version, '1.23'):
-            raise host_config_version_error('userns_mode', '1.23')
-
-        if userns_mode != "host":
-            raise host_config_value_error("userns_mode", userns_mode)
-        host_config['UsernsMode'] = userns_mode
-
-    if pids_limit:
-        if not isinstance(pids_limit, int):
-            raise host_config_type_error('pids_limit', pids_limit, 'int')
-        if version_lt(version, '1.23'):
-            raise host_config_version_error('pids_limit', '1.23')
-        host_config["PidsLimit"] = pids_limit
-
-    return host_config
-
-
-def normalize_links(links):
-    if isinstance(links, dict):
-        links = six.iteritems(links)
-
-    return ['{0}:{1}'.format(k, v) for k, v in sorted(links)]
-
-
-def create_networking_config(endpoints_config=None):
-    networking_config = {}
-
-    if endpoints_config:
-        networking_config["EndpointsConfig"] = endpoints_config
-
-    return networking_config
-
-
-def create_endpoint_config(version, aliases=None, links=None,
-                           ipv4_address=None, ipv6_address=None,
-                           link_local_ips=None):
-    if version_lt(version, '1.22'):
-        raise errors.InvalidVersion(
-            'Endpoint config is not supported for API version < 1.22'
-        )
-    endpoint_config = {}
-
-    if aliases:
-        endpoint_config["Aliases"] = aliases
-
-    if links:
-        endpoint_config["Links"] = normalize_links(links)
-
-    ipam_config = {}
-    if ipv4_address:
-        ipam_config['IPv4Address'] = ipv4_address
-
-    if ipv6_address:
-        ipam_config['IPv6Address'] = ipv6_address
-
-    if link_local_ips is not None:
-        if version_lt(version, '1.24'):
-            raise errors.InvalidVersion(
-                'link_local_ips is not supported for API version < 1.24'
-            )
-        ipam_config['LinkLocalIPs'] = link_local_ips
-
-    if ipam_config:
-        endpoint_config['IPAMConfig'] = ipam_config
-
-    return endpoint_config
-
-
-def parse_env_file(env_file):
-    """
-    Reads a line-separated environment file.
-    The format of each line should be "key=value".
-    """
-    environment = {}
-
-    with open(env_file, 'r') as f:
-        for line in f:
-
-            if line[0] == '#':
-                continue
-
-            parse_line = line.strip().split('=', 1)
-            if len(parse_line) == 2:
-                k, v = parse_line
-                environment[k] = v
-            else:
-                raise errors.DockerException(
-                    'Invalid line in environment file {0}:\n{1}'.format(
-                        env_file, line))
-
-    return environment
-
-
-def split_command(command):
-    if six.PY2 and not isinstance(command, six.binary_type):
-        command = command.encode('utf-8')
-    return shlex.split(command)
-
-
-def format_environment(environment):
-    def format_env(key, value):
-        if value is None:
-            return key
-        if isinstance(value, six.binary_type):
-            value = value.decode('utf-8')
-
-        return u'{key}={value}'.format(key=key, value=value)
-    return [format_env(*var) for var in six.iteritems(environment)]
-
-
-def create_container_config(
-    version, image, command, hostname=None, user=None, detach=False,
-    stdin_open=False, tty=False, mem_limit=None, ports=None, environment=None,
-    dns=None, volumes=None, volumes_from=None, network_disabled=False,
-    entrypoint=None, cpu_shares=None, working_dir=None, domainname=None,
-    memswap_limit=None, cpuset=None, host_config=None, mac_address=None,
-    labels=None, volume_driver=None, stop_signal=None, networking_config=None,
-):
-    if isinstance(command, six.string_types):
-        command = split_command(command)
-
-    if isinstance(entrypoint, six.string_types):
-        entrypoint = split_command(entrypoint)
-
-    if isinstance(environment, dict):
-        environment = format_environment(environment)
-
-    if labels is not None and compare_version('1.18', version) < 0:
-        raise errors.InvalidVersion(
-            'labels were only introduced in API version 1.18'
-        )
-
-    if cpuset is not None or cpu_shares is not None:
-        if version_gte(version, '1.18'):
-            warnings.warn(
-                'The cpuset_cpus and cpu_shares options have been moved to '
-                'host_config in API version 1.18, and will be removed',
-                DeprecationWarning
-            )
-
-    if stop_signal is not None and compare_version('1.21', version) < 0:
-        raise errors.InvalidVersion(
-            'stop_signal was only introduced in API version 1.21'
-        )
-
-    if compare_version('1.19', version) < 0:
-        if volume_driver is not None:
-            raise errors.InvalidVersion(
-                'Volume drivers were only introduced in API version 1.19'
-            )
-        mem_limit = mem_limit if mem_limit is not None else 0
-        memswap_limit = memswap_limit if memswap_limit is not None else 0
-    else:
-        if mem_limit is not None:
-            raise errors.InvalidVersion(
-                'mem_limit has been moved to host_config in API version 1.19'
-            )
-
-        if memswap_limit is not None:
-            raise errors.InvalidVersion(
-                'memswap_limit has been moved to host_config in API '
-                'version 1.19'
-            )
-
-    if isinstance(labels, list):
-        labels = dict((lbl, six.text_type('')) for lbl in labels)
-
-    if mem_limit is not None:
-        mem_limit = parse_bytes(mem_limit)
-
-    if memswap_limit is not None:
-        memswap_limit = parse_bytes(memswap_limit)
-
-    if isinstance(ports, list):
-        exposed_ports = {}
-        for port_definition in ports:
-            port = port_definition
-            proto = 'tcp'
-            if isinstance(port_definition, tuple):
-                if len(port_definition) == 2:
-                    proto = port_definition[1]
-                port = port_definition[0]
-            exposed_ports['{0}/{1}'.format(port, proto)] = {}
-        ports = exposed_ports
-
-    if isinstance(volumes, six.string_types):
-        volumes = [volumes, ]
-
-    if isinstance(volumes, list):
-        volumes_dict = {}
-        for vol in volumes:
-            volumes_dict[vol] = {}
-        volumes = volumes_dict
-
-    if volumes_from:
-        if not isinstance(volumes_from, six.string_types):
-            volumes_from = ','.join(volumes_from)
-    else:
-        # Force None, an empty list or dict causes client.start to fail
-        volumes_from = None
-
-    attach_stdin = False
-    attach_stdout = False
-    attach_stderr = False
-    stdin_once = False
-
-    if not detach:
-        attach_stdout = True
-        attach_stderr = True
-
-        if stdin_open:
-            attach_stdin = True
-            stdin_once = True
-
-    if compare_version('1.10', version) >= 0:
-        message = ('{0!r} parameter has no effect on create_container().'
-                   ' It has been moved to host_config')
-        if dns is not None:
-            raise errors.InvalidVersion(message.format('dns'))
-        if volumes_from is not None:
-            raise errors.InvalidVersion(message.format('volumes_from'))
-
-    return {
-        'Hostname': hostname,
-        'Domainname': domainname,
-        'ExposedPorts': ports,
-        'User': six.text_type(user) if user else None,
-        'Tty': tty,
-        'OpenStdin': stdin_open,
-        'StdinOnce': stdin_once,
-        'Memory': mem_limit,
-        'AttachStdin': attach_stdin,
-        'AttachStdout': attach_stdout,
-        'AttachStderr': attach_stderr,
-        'Env': environment,
-        'Cmd': command,
-        'Dns': dns,
-        'Image': image,
-        'Volumes': volumes,
-        'VolumesFrom': volumes_from,
-        'NetworkDisabled': network_disabled,
-        'Entrypoint': entrypoint,
-        'CpuShares': cpu_shares,
-        'Cpuset': cpuset,
-        'CpusetCpus': cpuset,
-        'WorkingDir': working_dir,
-        'MemorySwap': memswap_limit,
-        'HostConfig': host_config,
-        'NetworkingConfig': networking_config,
-        'MacAddress': mac_address,
-        'Labels': labels,
-        'VolumeDriver': volume_driver,
-        'StopSignal': stop_signal
-    }
diff --git a/env2/lib/python2.7/site-packages/docker/version.py b/env2/lib/python2.7/site-packages/docker/version.py
deleted file mode 100644
index 27d014c..0000000
--- a/env2/lib/python2.7/site-packages/docker/version.py
+++ /dev/null
@@ -1,2 +0,0 @@
-version = "1.10.6"
-version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
diff --git a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/DESCRIPTION.rst b/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/DESCRIPTION.rst
deleted file mode 100644
index e118723..0000000
--- a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/DESCRIPTION.rst
+++ /dev/null
@@ -1,3 +0,0 @@
-UNKNOWN
-
-
diff --git a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/INSTALLER b/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/INSTALLER
deleted file mode 100644
index a1b589e..0000000
--- a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/METADATA b/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/METADATA
deleted file mode 100644
index d6f1eb7..0000000
--- a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/METADATA
+++ /dev/null
@@ -1,32 +0,0 @@
-Metadata-Version: 2.0
-Name: docker-compose
-Version: 1.8.0
-Summary: Multi-container orchestration for Docker
-Home-page: https://www.docker.com/
-Author: Docker, Inc.
-Author-email: UNKNOWN
-License: Apache License 2.0
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Environment :: Console
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: Apache Software License
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.4
-Requires-Dist: cached-property (>=1.2.0,<2)
-Requires-Dist: docopt (<0.7,>=0.6.1)
-Requires-Dist: PyYAML (<4,>=3.10)
-Requires-Dist: requests (>=2.6.1,<2.8)
-Requires-Dist: texttable (>=0.8.1,<0.9)
-Requires-Dist: websocket-client (>=0.32.0,<1.0)
-Requires-Dist: docker-py (>=1.9.0,<2.0)
-Requires-Dist: dockerpty (<0.5,>=0.4.1)
-Requires-Dist: six (>=1.3.0,<2)
-Requires-Dist: jsonschema (>=2.5.1,<3)
-Requires-Dist: enum34 (>=1.0.4,<2)
-
-UNKNOWN
-
-
diff --git a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/RECORD b/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/RECORD
deleted file mode 100644
index 89a4211..0000000
--- a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/RECORD
+++ /dev/null
@@ -1,83 +0,0 @@
-compose/GITSHA,sha256=jpASxQL3TVzirAeKn9RVj9Zx-UOyi9_mxHgRPjHpjY4,8

-compose/__init__.py,sha256=rTFtWHw2kt3e2ffnFskQihuIX67673YKGReuu-KrCc4,102

-compose/__main__.py,sha256=s6kkTRY0cnWgIy8ptC8T48d7Z8UHbkSIhLNYKUJYkKY,122

-compose/bundle.py,sha256=533DxcDe3lHcoMbxdnQmK-on7Mi5okjxiKEpPB8Dlks,7036

-compose/const.py,sha256=wp1a0adBcp46kHHufz6rdpoKZlD5vaeOmGlR30p0zdg,792

-compose/container.py,sha256=XMpq6GkAIlzfTIRSh7RupHeQbp7zTYm8IZmXpXH8fmk,7749

-compose/errors.py,sha256=nnCXDJvKbUu9rTY3-s82TezWDuZreBkugYZgi-phgtM,178

-compose/network.py,sha256=wEYZ0EFAFHqEvgMdQCmQebvIj4wJSFh-puEK39oDDZ8,5994

-compose/parallel.py,sha256=wqRPUqeHz2qZHFrPAO9dkTXhgQzg-M7ljbryK0kFC-E,7436

-compose/progress_stream.py,sha256=MmE4M7lOyqHikHSX5yVvpdWJyz2wNa15cDKfi5E3oJk,3019

-compose/project.py,sha256=cBNXNU07k06qZbVhpPSNDQDsLdU0hebpf8h4eALSo7E,19718

-compose/service.py,sha256=f0HYq-nM-A1eqerfrRfEg8qy6aMB7LpuybDC4xvJt30,36427

-compose/state.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0

-compose/utils.py,sha256=sp9vpF1euhdTqTj5yXqzR0WZgpwfCGPWUjHIUYDqy9s,2691

-compose/volume.py,sha256=lPMmFGwNQu44PATrosKaeK08BgJh7tD9yjcxV-d82IY,4640

-compose/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0

-compose/cli/colors.py,sha256=xXRSficE5yN-2fAdN_ZtX9MJfizjgwM3PaxBTMCbs98,860

-compose/cli/command.py,sha256=d9XhUrWW9vnXyvlxekENpLTDbnbQfrG9yyUQ51VqShg,4041

-compose/cli/docker_client.py,sha256=nHfyxtgOyA3Kmtw8BtcOHY8R7mVSQNyON_Rj7Wpakzo,2135

-compose/cli/docopt_command.py,sha256=-ZziHSp14mIVfW7dMkO-FYPhwlgM6FYGGZx-cXECaoo,1700

-compose/cli/errors.py,sha256=vSqOqS_t1gU8CuX8OdFNKerI1jNSFcM5iexYKaCg-LY,3915

-compose/cli/formatter.py,sha256=_3KuUmpoJw7guGZixU9E8IMTGY_UWC8yAUP9FRcIRBc,1358

-compose/cli/log_printer.py,sha256=Y4pFpNBaNG_XbF2vTKA8TiT7P_lC1FaOfHKz8OkdDLM,6605

-compose/cli/main.py,sha256=uXo3isrKJiWDLgwDS7X9HJ_FDH4u5DUrmwrN2mVyK5w,37145

-compose/cli/signals.py,sha256=QlwX3FHiqIQA6f7_QCOx4rG93fUimCreurYl3bIR12s,395

-compose/cli/utils.py,sha256=QmBbqZ-m46IaHY7arVA0O46mxAeOFT24KDrjB51p0Cs,3192

-compose/cli/verbose_proxy.py,sha256=SIrY0HAIiUH9HyaRTDd692JHvMQw4kUwt0qSOZ6bCkQ,1770

-compose/config/__init__.py,sha256=nF5USHmyudNrl6SA0QoS_5kNCtkFTSr7vPfyINpxG70,325

-compose/config/config.py,sha256=pSz1RtdYDzIcqcwBLOSuSSfkchJuHe_R2AcATu-tenI,32327

-compose/config/config_schema_v1.json,sha256=y6W8hrnIP_tqp53vvbXdRYCkeZXGzN4p8zJmsnaYJEA,5475

-compose/config/config_schema_v2.0.json,sha256=iDIwdIJvC8rO2dC8IMk23sPu1gA8_3iah2P7adbxGSw,8823

-compose/config/environment.py,sha256=6saRBOppUlDZQd8yF2pFecftWsBlaBAWptKGYSE2Brw,3235

-compose/config/errors.py,sha256=MGVp2LTs8EIOLB9aMTV8E2bO5bVYFwe26YCzCSyzS-U,1432

-compose/config/interpolation.py,sha256=2cXGuIoffLnrvsrtypE6PB3VKhl3B4mHUzGnizaMTZc,1751

-compose/config/serialize.py,sha256=-e5BtcnaAPR_KR187uF44Mxi7ZfBH9f5hCPgrZyX7cw,1646

-compose/config/sort_services.py,sha256=nl2DvRHmO-_9-wrmWRetkrXE-hJO8QWr6Pk8YAUWrPE,2408

-compose/config/types.py,sha256=GrPFhEPIMspCrEIy1YqGW7m0PNRQBFhATJPaxPPTmaA,6215

-compose/config/validation.py,sha256=1-jLkCltUZBxFI4QMviUW4paPnqPXkfZuUbh8QMbxks,14474

-docker_compose-1.8.0.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10

-docker_compose-1.8.0.dist-info/METADATA,sha256=h3Y_WCbTK1PD5qljExeiyEfxj71KFCQ2tPj5O5mEkEg,1065

-docker_compose-1.8.0.dist-info/RECORD,,

-docker_compose-1.8.0.dist-info/WHEEL,sha256=siaGhHNH1IivIQO0_6mMI-08XttA7Qr2K9-kq5rC0zk,96

-docker_compose-1.8.0.dist-info/entry_points.txt,sha256=-8iKBsI1us-81I-rE86q5ZyOIQJKSiP-kBugo6Ml_gk,68

-docker_compose-1.8.0.dist-info/metadata.json,sha256=Cmku150e5kFzv2CDdEBxcdE39A071_pUXgLTdwgDZ6U,1314

-docker_compose-1.8.0.dist-info/pbr.json,sha256=VCVFzZPRQpukwD9OMPEIOF3mBeIEbAh2TrNve5nbG0U,47

-docker_compose-1.8.0.dist-info/top_level.txt,sha256=jLmhBR06QFGLqfBVdNJ3JPy-7jr7nSl7LgGemw-WiZw,8

-../../../bin/docker-compose,sha256=7DEkKhbx46PlzrtxonKKwF-r4Mjrc6I8sPKJ50pjxWY,287

-docker_compose-1.8.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4

-compose/network.pyc,,

-compose/utils.pyc,,

-compose/cli/signals.pyc,,

-compose/config/types.pyc,,

-compose/const.pyc,,

-compose/state.pyc,,

-compose/service.pyc,,

-compose/config/config.pyc,,

-compose/cli/formatter.pyc,,

-compose/config/serialize.pyc,,

-compose/config/errors.pyc,,

-compose/config/__init__.pyc,,

-compose/cli/errors.pyc,,

-compose/errors.pyc,,

-compose/cli/command.pyc,,

-compose/cli/utils.pyc,,

-compose/volume.pyc,,

-compose/cli/docker_client.pyc,,

-compose/bundle.pyc,,

-compose/cli/docopt_command.pyc,,

-compose/container.pyc,,

-compose/cli/log_printer.pyc,,

-compose/config/sort_services.pyc,,

-compose/cli/main.pyc,,

-compose/project.pyc,,

-compose/cli/__init__.pyc,,

-compose/config/interpolation.pyc,,

-compose/__main__.pyc,,

-compose/__init__.pyc,,

-compose/progress_stream.pyc,,

-compose/parallel.pyc,,

-compose/cli/verbose_proxy.pyc,,

-compose/cli/colors.pyc,,

-compose/config/validation.pyc,,

-compose/config/environment.pyc,,

diff --git a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/WHEEL b/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/WHEEL
deleted file mode 100644
index 79e7cc9..0000000
--- a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/WHEEL
+++ /dev/null
@@ -1,5 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.30.0.a0)
-Root-Is-Purelib: true
-Tag: cp27-none-any
-
diff --git a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/entry_points.txt b/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/entry_points.txt
deleted file mode 100644
index b6dbac9..0000000
--- a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/entry_points.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-
-    [console_scripts]
-    docker-compose=compose.cli.main:main
-    
\ No newline at end of file
diff --git a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/metadata.json b/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/metadata.json
deleted file mode 100644
index c2b1189..0000000
--- a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/metadata.json
+++ /dev/null
@@ -1 +0,0 @@
-{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4"], "extensions": {"python.commands": {"wrap_console": {"docker-compose": "compose.cli.main:main"}}, "python.details": {"contacts": [{"name": "Docker, Inc.", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://www.docker.com/"}}, "python.exports": {"console_scripts": {"docker-compose": "compose.cli.main:main"}}}, "extras": [], "generator": "bdist_wheel (0.30.0.a0)", "license": "Apache License 2.0", "metadata_version": "2.0", "name": "docker-compose", "run_requires": [{"requires": ["PyYAML (<4,>=3.10)", "cached-property (>=1.2.0,<2)", "docker-py (>=1.9.0,<2.0)", "dockerpty (<0.5,>=0.4.1)", "docopt (<0.7,>=0.6.1)", "enum34 (>=1.0.4,<2)", "jsonschema (>=2.5.1,<3)", "requests (>=2.6.1,<2.8)", "six (>=1.3.0,<2)", "texttable (>=0.8.1,<0.9)", "websocket-client (>=0.32.0,<1.0)"]}], "summary": "Multi-container orchestration for Docker", "test_requires": [{"requires": ["mock (>=1.0.1)", "pytest"]}], "version": "1.8.0"}
\ No newline at end of file
diff --git a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/pbr.json b/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/pbr.json
deleted file mode 100644
index 2140742..0000000
--- a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/pbr.json
+++ /dev/null
@@ -1 +0,0 @@
-{"is_release": false, "git_version": "6c29830"}
\ No newline at end of file
diff --git a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/top_level.txt b/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/top_level.txt
deleted file mode 100644
index d66d399..0000000
--- a/env2/lib/python2.7/site-packages/docker_compose-1.8.0.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-compose
diff --git a/env2/lib/python2.7/site-packages/docker_py-1.10.6.dist-info/DESCRIPTION.rst b/env2/lib/python2.7/site-packages/docker_py-1.10.6.dist-info/DESCRIPTION.rst
deleted file mode 100644
index 06d1a8e..0000000
--- a/env2/lib/python2.7/site-packages/docker_py-1.10.6.dist-info/DESCRIPTION.rst
+++ /dev/null
@@ -1,39 +0,0 @@
-docker-py
-=========
-
-|Build Status|
-
-A Python library for the Docker Remote API. It does everything the
-``docker`` command does, but from within Python – run containers, manage
-them, pull/push images, etc.
-
-Installation
-------------
-
-The latest stable version is always available on PyPi.
-
-::
-
-    pip install docker-py
-
-Documentation
--------------
-
-|Documentation Status|
-
-`Read the full documentation
-here <https://docker-py.readthedocs.io/en/latest/>`__. The source is
-available in the ``docs/`` directory.
-
-License
--------
-
-Docker is licensed under the Apache License, Version 2.0. See LICENSE
-for full license text
-
-.. |Build Status| image:: https://travis-ci.org/docker/docker-py.png
-   :target: https://travis-ci.org/docker/docker-py
-.. |Documentation Status| image:: https://readthedocs.org/projects/docker-py/badge/?version=latest
-   :target: https://readthedocs.org/projects/docker-py/?badge=latest
-
-
diff --git a/env2/lib/python2.7/site-packages/docker_py-1.10.6.dist-info/INSTALLER b/env2/lib/python2.7/site-packages/docker_py-1.10.6.dist-info/INSTALLER
deleted file mode 100644
index a1b589e..0000000
--- a/env2/lib/python2.7/site-packages/docker_py-1.10.6.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/env2/lib/python2.7/site-packages/docker_py-1.10.6.dist-info/METADATA b/env2/lib/python2.7/site-packages/docker_py-1.10.6.dist-info/METADATA
deleted file mode 100644
index ecf839b..0000000
--- a/env2/lib/python2.7/site-packages/docker_py-1.10.6.dist-info/METADATA
+++ /dev/null
@@ -1,69 +0,0 @@
-Metadata-Version: 2.0
-Name: docker-py
-Version: 1.10.6
-Summary: Python client for Docker.
-Home-page: https://github.com/docker/docker-py/
-Author: Joffrey F
-Author-email: joffrey@docker.com
-License: UNKNOWN
-Platform: UNKNOWN
-Classifier: Development Status :: 4 - Beta
-Classifier: Environment :: Other Environment
-Classifier: Intended Audience :: Developers
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Topic :: Utilities
-Classifier: License :: OSI Approved :: Apache Software License
-Requires-Dist: docker-pycreds (>=0.2.1)
-Requires-Dist: requests (>=2.5.2,!=2.11.0)
-Requires-Dist: six (>=1.4.0)
-Requires-Dist: websocket-client (>=0.32.0)
-Requires-Dist: ipaddress (>=1.0.16); python_version < "3.3"
-Requires-Dist: backports.ssl-match-hostname (>=3.5); python_version < "3.5"
-
-docker-py
-=========
-
-|Build Status|
-
-A Python library for the Docker Remote API. It does everything the
-``docker`` command does, but from within Python – run containers, manage
-them, pull/push images, etc.
-
-Installation
-------------
-
-The latest stable version is always available on PyPi.
-
-::
-
-    pip install docker-py
-
-Documentation
--------------
-
-|Documentation Status|
-
-`Read the full documentation
-here <https://docker-py.readthedocs.io/en/latest/>`__. The source is
-available in the ``docs/`` directory.
-
-License
--------
-
-Docker is licensed under the Apache License, Version 2.0. See LICENSE
-for full license text
-
-.. |Build Status| image:: https://travis-ci.org/docker/docker-py.png
-   :target: https://travis-ci.org/docker/docker-py
-.. |Documentation Status| image:: https://readthedocs.org/projects/docker-py/badge/?version=latest
-   :target: https://readthedocs.org/projects/docker-py/?badge=latest
-
-
diff --git a/env2/lib/python2.7/site-packages/docker_py-1.10.6.dist-info/RECORD b/env2/lib/python2.7/site-packages/docker_py-1.10.6.dist-info/RECORD
deleted file mode 100644
index 38dd04f..0000000
--- a/env2/lib/python2.7/site-packages/docker_py-1.10.6.dist-info/RECORD
+++ /dev/null
@@ -1,79 +0,0 @@
-docker/__init__.py,sha256=t4Je1DhG_m_o8j3KttwsAPIA1lZPvByNUL5McbDBXSA,162

-docker/client.py,sha256=Dr6hvbHjSoKDWFaY_izh7rNjKTPitZtS9bYUt7aoFMM,14505

-docker/constants.py,sha256=9ydLQmdHPeAbg7Xzwm1mHqF-I08Oqard-C7uF17FbTE,489

-docker/errors.py,sha256=Xq8KWvTajV24k0KYQCC4eT2qo4Fz18vw2cW-5FWATNI,1871

-docker/tls.py,sha256=u7eNySocRScoMjKPGIr4zRawfwuh4DHl7-4lSy2MQ5c,2595

-docker/version.py,sha256=z0Q4ivcptJ75GrSGMNQznFmPEoAV_RRbmxSz6rXdd3U,92

-docker/api/__init__.py,sha256=3aLcftoC-ejG5Dw7XRX1gLEAJ-TNY2R-_P28yYIGAvM,334

-docker/api/build.py,sha256=49zeu31gxM9iZtDtch1TyqK4c-lLAmjUEW7tSnewarM,5157

-docker/api/container.py,sha256=VA1c3Rusk7GStimvJkE7f-eVSbyw6iuLGa1-2aOo0lU,17124

-docker/api/daemon.py,sha256=TJonPi8WIn6IhDRTPUdqlyJ3xDhz74xMWk0BDlQAPCM,2679

-docker/api/exec_api.py,sha256=Q5HkF9DpEueV6Y0IY-Wv_aEudxoyEG4t1VtuoF5h29s,2556

-docker/api/image.py,sha256=iJ9iYqzWxdBwXfhcUbWw2K3-KAa01WTMVkrq5fQKM0Q,8886

-docker/api/network.py,sha256=5GBNi62PBfTSV2IerogKFGcbB4cWYglX8eJ3jAj3TZ4,3720

-docker/api/service.py,sha256=v5dWek01JO2YsObapXLCpw5H6lPUCNGvrrSIps71M4w,3760

-docker/api/swarm.py,sha256=VczPgYNSPoPZ5Vfxqx9SL0wPpNjjoARRQ5vJ1HPCSX8,2679

-docker/api/volume.py,sha256=SdV_PiZ1D22hQ0oyUyPreZHgkGYe7jloZFKfYmyHNg0,1562

-docker/auth/__init__.py,sha256=7bj7rEAuDg5z9heROSFC075-FWCJcUrNeCEpKNcBYJ4,157

-docker/auth/auth.py,sha256=YQzwhVKH2yQhap5pBH6-3rer-FIccDo5wFHAae-3Jio,9949

-docker/ssladapter/__init__.py,sha256=Es-OKMX3UouN-l1kZolFJhPdzgQZ11hCs1cEfxCZ-kg,50

-docker/ssladapter/ssladapter.py,sha256=HHrS1275s3RlqW07NVYfwOiz7u21mSQEw8ep-JJ65Nk,2355

-docker/transport/__init__.py,sha256=D4-MAqRUIow4T54yHyrENEsmAl01eKk1cknBgTKIhsE,163

-docker/transport/npipeconn.py,sha256=7RSNjoLxcaoc3vgmcB-IqY7tptlaIG7VQ6ac-ZJvnr4,3260

-docker/transport/npipesocket.py,sha256=tV8LvkgxSFjfjaWwT96Bdp6JJCSt-iy7aHtB1VdzNAU,5856

-docker/transport/unixconn.py,sha256=9aTPjdRQh1SIrvUyDZMneRP7Dgb4-5yPFlRkR3oaz78,2674

-docker/types/__init__.py,sha256=izknOdpwO8FsszlLsw-GNwMo0TWj-FzqpvmdeHB5lW4,226

-docker/types/base.py,sha256=piWvrWGsYC7GAm6QdfkDmwG90jcYc7ROaNCHLejbzpQ,130

-docker/types/containers.py,sha256=Plw4J9tNcYTHmQEOwdvprW992yRFq1UhIK8um6XLNfk,2213

-docker/types/services.py,sha256=MQPSBDefO-JbkunFGX-tHjHbB9kJ6qaHBNXVaPe5ySU,5715

-docker/types/swarm.py,sha256=eYtjT3OlAGR1I0QJpIbCqDNjQ8S9sz8l4RCfXK5Syr8,1590

-docker/utils/__init__.py,sha256=rdIH3Nhtwi0ky9lDgWLwizwRWe2pnp3gTmtxRWzUHlY,628

-docker/utils/decorators.py,sha256=LdmBeFNnBiD406cbaSwG7R8_OBBchBsK9BTvKxQnJ6w,1546

-docker/utils/socket.py,sha256=mYlzmYhLv8l5FqL3-EzeDLqWB-JysS54TCImRwNmrpQ,1685

-docker/utils/types.py,sha256=gMFyrx0THXa7BOB-cSQBsPsxggatwYcJkRjKHwpLMRg,220

-docker/utils/utils.py,sha256=V8lPg34rQn519NsgGwuArq_TznOVPotPXCF9S4F3KAg,35969

-docker/utils/ports/__init__.py,sha256=WVp6voMB16-Og9Ry_lDi4rhn6SczIRfJR3nA5cIG8C4,78

-docker/utils/ports/ports.py,sha256=eermjlGxh3BaJgtBgQ-XadC_ywzFEhA6ETd6NgRJHoE,2847

-docker_py-1.10.6.dist-info/DESCRIPTION.rst,sha256=eOjNMoycF5k24eRWoTGkX1ye2WnjvEesrNBw7AzrnaI,914

-docker_py-1.10.6.dist-info/METADATA,sha256=DGvMTavLSX8T7-C3fbSSYR4KHtYnri_UMuQmrlexc8I,2091

-docker_py-1.10.6.dist-info/RECORD,,

-docker_py-1.10.6.dist-info/WHEEL,sha256=GrqQvamwgBV4nLoJe0vhYRSWzWsx7xjlt74FT0SWYfE,110

-docker_py-1.10.6.dist-info/metadata.json,sha256=ZrIbltkadLOjIe0JZG6FEev1KqQFv0VW9BXzpcHwXLM,1417

-docker_py-1.10.6.dist-info/top_level.txt,sha256=ANFR59OS5o4sdWpvxCZAAG3cCpjTfbo_kKe3P2MYi70,7

-docker_py-1.10.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4

-docker/types/containers.py