From 8fdc8e7a3d1f155f3df714fcd442e07a4c98efef Mon Sep 17 00:00:00 2001
From: Computerboer <58400197+ComputerBoer@users.noreply.github.com>
Date: Sat, 13 Jul 2024 00:18:30 +0200
Subject: [PATCH] Delete env directory
---
env/Lib/site-packages/easy_install.py | 5 -
.../pip-9.0.1.dist-info/DESCRIPTION.rst | 39 -
.../pip-9.0.1.dist-info/INSTALLER | 1 -
.../pip-9.0.1.dist-info/METADATA | 69 -
.../site-packages/pip-9.0.1.dist-info/RECORD | 501 --
.../site-packages/pip-9.0.1.dist-info/WHEEL | 6 -
.../pip-9.0.1.dist-info/entry_points.txt | 5 -
.../pip-9.0.1.dist-info/metadata.json | 1 -
.../pip-9.0.1.dist-info/top_level.txt | 1 -
env/Lib/site-packages/pip/__init__.py | 331 -
env/Lib/site-packages/pip/__main__.py | 19 -
env/Lib/site-packages/pip/_vendor/__init__.py | 107 -
env/Lib/site-packages/pip/_vendor/appdirs.py | 552 --
.../pip/_vendor/cachecontrol/__init__.py | 11 -
.../pip/_vendor/cachecontrol/_cmd.py | 60 -
.../pip/_vendor/cachecontrol/adapter.py | 125 -
.../pip/_vendor/cachecontrol/cache.py | 39 -
.../_vendor/cachecontrol/caches/__init__.py | 18 -
.../_vendor/cachecontrol/caches/file_cache.py | 116 -
.../cachecontrol/caches/redis_cache.py | 41 -
.../pip/_vendor/cachecontrol/compat.py | 20 -
.../pip/_vendor/cachecontrol/controller.py | 353 -
.../pip/_vendor/cachecontrol/filewrapper.py | 78 -
.../pip/_vendor/cachecontrol/heuristics.py | 138 -
.../pip/_vendor/cachecontrol/serialize.py | 196 -
.../pip/_vendor/cachecontrol/wrapper.py | 21 -
.../pip/_vendor/colorama/__init__.py | 7 -
.../pip/_vendor/colorama/ansi.py | 102 -
.../pip/_vendor/colorama/ansitowin32.py | 236 -
.../pip/_vendor/colorama/initialise.py | 82 -
.../pip/_vendor/colorama/win32.py | 154 -
.../pip/_vendor/colorama/winterm.py | 162 -
.../pip/_vendor/distlib/__init__.py | 23 -
.../pip/_vendor/distlib/_backport/__init__.py | 6 -
.../pip/_vendor/distlib/_backport/misc.py | 41 -
.../pip/_vendor/distlib/_backport/shutil.py | 761 ---
.../_vendor/distlib/_backport/sysconfig.cfg | 84 -
.../_vendor/distlib/_backport/sysconfig.py | 788 ---
.../pip/_vendor/distlib/_backport/tarfile.py | 2607 --------
.../pip/_vendor/distlib/compat.py | 1111 ----
.../pip/_vendor/distlib/database.py | 1312 ----
.../pip/_vendor/distlib/index.py | 515 --
.../pip/_vendor/distlib/locators.py | 1283 ----
.../pip/_vendor/distlib/manifest.py | 393 --
.../pip/_vendor/distlib/markers.py | 190 -
.../pip/_vendor/distlib/metadata.py | 1068 ----
.../pip/_vendor/distlib/resources.py | 355 -
.../pip/_vendor/distlib/scripts.py | 384 --
.../site-packages/pip/_vendor/distlib/t32.exe | Bin 89088 -> 0 bytes
.../site-packages/pip/_vendor/distlib/t64.exe | Bin 97792 -> 0 bytes
.../site-packages/pip/_vendor/distlib/util.py | 1611 -----
.../pip/_vendor/distlib/version.py | 742 ---
.../site-packages/pip/_vendor/distlib/w32.exe | Bin 85504 -> 0 bytes
.../site-packages/pip/_vendor/distlib/w64.exe | Bin 94208 -> 0 bytes
.../pip/_vendor/distlib/wheel.py | 978 ---
env/Lib/site-packages/pip/_vendor/distro.py | 1081 ----
.../pip/_vendor/html5lib/__init__.py | 25 -
.../pip/_vendor/html5lib/_ihatexml.py | 288 -
.../pip/_vendor/html5lib/_inputstream.py | 923 ---
.../pip/_vendor/html5lib/_tokenizer.py | 1721 -----
.../pip/_vendor/html5lib/_trie/__init__.py | 14 -
.../pip/_vendor/html5lib/_trie/_base.py | 38 -
.../pip/_vendor/html5lib/_trie/datrie.py | 44 -
.../pip/_vendor/html5lib/_trie/py.py | 67 -
.../pip/_vendor/html5lib/_utils.py | 127 -
.../pip/_vendor/html5lib/constants.py | 2945 ---------
.../pip/_vendor/html5lib/filters/__init__.py | 0
.../filters/alphabeticalattributes.py | 20 -
.../pip/_vendor/html5lib/filters/base.py | 12 -
.../html5lib/filters/inject_meta_charset.py | 65 -
.../pip/_vendor/html5lib/filters/lint.py | 81 -
.../_vendor/html5lib/filters/optionaltags.py | 206 -
.../pip/_vendor/html5lib/filters/sanitizer.py | 865 ---
.../_vendor/html5lib/filters/whitespace.py | 38 -
.../pip/_vendor/html5lib/html5parser.py | 2733 --------
.../pip/_vendor/html5lib/serializer.py | 334 -
.../_vendor/html5lib/treeadapters/__init__.py | 12 -
.../_vendor/html5lib/treeadapters/genshi.py | 47 -
.../pip/_vendor/html5lib/treeadapters/sax.py | 44 -
.../_vendor/html5lib/treebuilders/__init__.py | 76 -
.../pip/_vendor/html5lib/treebuilders/base.py | 383 --
.../pip/_vendor/html5lib/treebuilders/dom.py | 236 -
.../_vendor/html5lib/treebuilders/etree.py | 340 -
.../html5lib/treebuilders/etree_lxml.py | 367 --
.../_vendor/html5lib/treewalkers/__init__.py | 143 -
.../pip/_vendor/html5lib/treewalkers/base.py | 150 -
.../pip/_vendor/html5lib/treewalkers/dom.py | 43 -
.../pip/_vendor/html5lib/treewalkers/etree.py | 137 -
.../html5lib/treewalkers/etree_lxml.py | 213 -
.../_vendor/html5lib/treewalkers/genshi.py | 69 -
.../site-packages/pip/_vendor/ipaddress.py | 2425 -------
.../pip/_vendor/lockfile/__init__.py | 347 -
.../pip/_vendor/lockfile/linklockfile.py | 73 -
.../pip/_vendor/lockfile/mkdirlockfile.py | 84 -
.../pip/_vendor/lockfile/pidlockfile.py | 190 -
.../pip/_vendor/lockfile/sqlitelockfile.py | 156 -
.../pip/_vendor/lockfile/symlinklockfile.py | 70 -
.../site-packages/pip/_vendor/ordereddict.py | 127 -
.../pip/_vendor/packaging/__about__.py | 21 -
.../pip/_vendor/packaging/__init__.py | 14 -
.../pip/_vendor/packaging/_compat.py | 30 -
.../pip/_vendor/packaging/_structures.py | 68 -
.../pip/_vendor/packaging/markers.py | 303 -
.../pip/_vendor/packaging/requirements.py | 129 -
.../pip/_vendor/packaging/specifiers.py | 774 ---
.../pip/_vendor/packaging/utils.py | 14 -
.../pip/_vendor/packaging/version.py | 393 --
.../pip/_vendor/pkg_resources/__init__.py | 3052 ---------
.../pip/_vendor/progress/__init__.py | 123 -
.../site-packages/pip/_vendor/progress/bar.py | 83 -
.../pip/_vendor/progress/counter.py | 47 -
.../pip/_vendor/progress/helpers.py | 91 -
.../pip/_vendor/progress/spinner.py | 40 -
.../site-packages/pip/_vendor/pyparsing.py | 5696 -----------------
.../site-packages/pip/_vendor/re-vendor.py | 34 -
.../pip/_vendor/requests/__init__.py | 88 -
.../pip/_vendor/requests/adapters.py | 503 --
.../site-packages/pip/_vendor/requests/api.py | 148 -
.../pip/_vendor/requests/auth.py | 252 -
.../pip/_vendor/requests/cacert.pem | 5616 ----------------
.../pip/_vendor/requests/certs.py | 25 -
.../pip/_vendor/requests/compat.py | 68 -
.../pip/_vendor/requests/cookies.py | 540 --
.../pip/_vendor/requests/exceptions.py | 114 -
.../pip/_vendor/requests/hooks.py | 34 -
.../pip/_vendor/requests/models.py | 873 ---
.../pip/_vendor/requests/sessions.py | 712 ---
.../pip/_vendor/requests/status_codes.py | 91 -
.../pip/_vendor/requests/structures.py | 105 -
.../pip/_vendor/requests/utils.py | 817 ---
env/Lib/site-packages/pip/_vendor/retrying.py | 267 -
env/Lib/site-packages/pip/_vendor/six.py | 868 ---
.../pip/_vendor/webencodings/__init__.py | 342 -
.../pip/_vendor/webencodings/labels.py | 231 -
.../pip/_vendor/webencodings/mklabels.py | 59 -
.../pip/_vendor/webencodings/tests.py | 153 -
.../_vendor/webencodings/x_user_defined.py | 325 -
env/Lib/site-packages/pip/basecommand.py | 337 -
env/Lib/site-packages/pip/baseparser.py | 293 -
env/Lib/site-packages/pip/cmdoptions.py | 633 --
.../site-packages/pip/commands/__init__.py | 86 -
env/Lib/site-packages/pip/commands/check.py | 39 -
.../site-packages/pip/commands/completion.py | 81 -
.../site-packages/pip/commands/download.py | 212 -
env/Lib/site-packages/pip/commands/freeze.py | 87 -
env/Lib/site-packages/pip/commands/hash.py | 57 -
env/Lib/site-packages/pip/commands/help.py | 35 -
env/Lib/site-packages/pip/commands/install.py | 437 --
env/Lib/site-packages/pip/commands/list.py | 337 -
env/Lib/site-packages/pip/commands/search.py | 133 -
env/Lib/site-packages/pip/commands/show.py | 154 -
.../site-packages/pip/commands/uninstall.py | 76 -
env/Lib/site-packages/pip/commands/wheel.py | 208 -
env/Lib/site-packages/pip/compat/__init__.py | 164 -
.../site-packages/pip/compat/dictconfig.py | 565 --
env/Lib/site-packages/pip/download.py | 906 ---
env/Lib/site-packages/pip/exceptions.py | 244 -
env/Lib/site-packages/pip/index.py | 1102 ----
env/Lib/site-packages/pip/locations.py | 182 -
env/Lib/site-packages/pip/models/__init__.py | 4 -
env/Lib/site-packages/pip/models/index.py | 16 -
.../site-packages/pip/operations/__init__.py | 0
env/Lib/site-packages/pip/operations/check.py | 49 -
.../site-packages/pip/operations/freeze.py | 132 -
env/Lib/site-packages/pip/pep425tags.py | 324 -
env/Lib/site-packages/pip/req/__init__.py | 10 -
env/Lib/site-packages/pip/req/req_file.py | 342 -
env/Lib/site-packages/pip/req/req_install.py | 1204 ----
env/Lib/site-packages/pip/req/req_set.py | 798 ---
.../site-packages/pip/req/req_uninstall.py | 195 -
env/Lib/site-packages/pip/status_codes.py | 8 -
env/Lib/site-packages/pip/utils/__init__.py | 852 ---
env/Lib/site-packages/pip/utils/appdirs.py | 248 -
env/Lib/site-packages/pip/utils/build.py | 42 -
.../site-packages/pip/utils/deprecation.py | 76 -
env/Lib/site-packages/pip/utils/encoding.py | 31 -
env/Lib/site-packages/pip/utils/filesystem.py | 28 -
env/Lib/site-packages/pip/utils/glibc.py | 81 -
env/Lib/site-packages/pip/utils/hashes.py | 92 -
env/Lib/site-packages/pip/utils/logging.py | 130 -
env/Lib/site-packages/pip/utils/outdated.py | 162 -
env/Lib/site-packages/pip/utils/packaging.py | 63 -
.../pip/utils/setuptools_build.py | 8 -
env/Lib/site-packages/pip/utils/ui.py | 344 -
env/Lib/site-packages/pip/vcs/__init__.py | 366 --
env/Lib/site-packages/pip/vcs/bazaar.py | 116 -
env/Lib/site-packages/pip/vcs/git.py | 300 -
env/Lib/site-packages/pip/vcs/mercurial.py | 103 -
env/Lib/site-packages/pip/vcs/subversion.py | 269 -
env/Lib/site-packages/pip/wheel.py | 853 ---
.../site-packages/pkg_resources/__init__.py | 3051 ---------
.../pkg_resources/_vendor/__init__.py | 0
.../pkg_resources/_vendor/appdirs.py | 552 --
.../_vendor/packaging/__about__.py | 21 -
.../_vendor/packaging/__init__.py | 14 -
.../_vendor/packaging/_compat.py | 30 -
.../_vendor/packaging/_structures.py | 68 -
.../_vendor/packaging/markers.py | 287 -
.../_vendor/packaging/requirements.py | 127 -
.../_vendor/packaging/specifiers.py | 774 ---
.../pkg_resources/_vendor/packaging/utils.py | 14 -
.../_vendor/packaging/version.py | 393 --
.../pkg_resources/_vendor/pyparsing.py | 5696 -----------------
.../pkg_resources/_vendor/six.py | 868 ---
.../pkg_resources/extern/__init__.py | 73 -
.../DESCRIPTION.rst | 243 -
.../setuptools-28.8.0.dist-info/INSTALLER | 1 -
.../setuptools-28.8.0.dist-info/METADATA | 272 -
.../setuptools-28.8.0.dist-info/RECORD | 143 -
.../setuptools-28.8.0.dist-info/WHEEL | 6 -
.../dependency_links.txt | 2 -
.../entry_points.txt | 63 -
.../setuptools-28.8.0.dist-info/metadata.json | 1 -
.../setuptools-28.8.0.dist-info/top_level.txt | 3 -
.../setuptools-28.8.0.dist-info/zip-safe | 1 -
env/Lib/site-packages/setuptools/__init__.py | 160 -
.../site-packages/setuptools/archive_util.py | 173 -
env/Lib/site-packages/setuptools/cli-32.exe | Bin 65536 -> 0 bytes
env/Lib/site-packages/setuptools/cli-64.exe | Bin 74752 -> 0 bytes
env/Lib/site-packages/setuptools/cli.exe | Bin 65536 -> 0 bytes
.../setuptools/command/__init__.py | 17 -
.../site-packages/setuptools/command/alias.py | 80 -
.../setuptools/command/bdist_egg.py | 472 --
.../setuptools/command/bdist_rpm.py | 43 -
.../setuptools/command/bdist_wininst.py | 21 -
.../setuptools/command/build_ext.py | 328 -
.../setuptools/command/build_py.py | 270 -
.../setuptools/command/develop.py | 197 -
.../setuptools/command/easy_install.py | 2287 -------
.../setuptools/command/egg_info.py | 697 --
.../setuptools/command/install.py | 125 -
.../setuptools/command/install_egg_info.py | 62 -
.../setuptools/command/install_lib.py | 121 -
.../setuptools/command/install_scripts.py | 65 -
.../setuptools/command/launcher manifest.xml | 15 -
.../setuptools/command/py36compat.py | 136 -
.../setuptools/command/register.py | 10 -
.../setuptools/command/rotate.py | 66 -
.../setuptools/command/saveopts.py | 22 -
.../site-packages/setuptools/command/sdist.py | 202 -
.../setuptools/command/setopt.py | 149 -
.../site-packages/setuptools/command/test.py | 247 -
.../setuptools/command/upload.py | 38 -
.../setuptools/command/upload_docs.py | 206 -
env/Lib/site-packages/setuptools/depends.py | 217 -
env/Lib/site-packages/setuptools/dist.py | 914 ---
env/Lib/site-packages/setuptools/extension.py | 57 -
.../setuptools/extern/__init__.py | 4 -
env/Lib/site-packages/setuptools/glob.py | 176 -
env/Lib/site-packages/setuptools/gui-32.exe | Bin 65536 -> 0 bytes
env/Lib/site-packages/setuptools/gui-64.exe | Bin 75264 -> 0 bytes
env/Lib/site-packages/setuptools/gui.exe | Bin 65536 -> 0 bytes
env/Lib/site-packages/setuptools/launch.py | 35 -
.../site-packages/setuptools/lib2to3_ex.py | 62 -
env/Lib/site-packages/setuptools/monkey.py | 186 -
env/Lib/site-packages/setuptools/msvc.py | 1193 ----
.../site-packages/setuptools/namespaces.py | 93 -
.../site-packages/setuptools/package_index.py | 1115 ----
.../site-packages/setuptools/py26compat.py | 31 -
.../site-packages/setuptools/py27compat.py | 18 -
.../site-packages/setuptools/py31compat.py | 56 -
env/Lib/site-packages/setuptools/sandbox.py | 492 --
.../setuptools/script (dev).tmpl | 5 -
env/Lib/site-packages/setuptools/script.tmpl | 3 -
.../site-packages/setuptools/site-patch.py | 74 -
.../site-packages/setuptools/ssl_support.py | 250 -
.../site-packages/setuptools/unicode_utils.py | 44 -
env/Lib/site-packages/setuptools/version.py | 6 -
.../setuptools/windows_support.py | 29 -
env/Lib/tcl8.6/init.tcl | 818 ---
env/Scripts/Activate.ps1 | 51 -
env/Scripts/_asyncio.pyd | Bin 45208 -> 0 bytes
env/Scripts/_bz2.pyd | Bin 77976 -> 0 bytes
env/Scripts/_ctypes.pyd | Bin 101528 -> 0 bytes
env/Scripts/_ctypes_test.pyd | Bin 29848 -> 0 bytes
env/Scripts/_decimal.pyd | Bin 215704 -> 0 bytes
env/Scripts/_elementtree.pyd | Bin 162456 -> 0 bytes
env/Scripts/_hashlib.pyd | Bin 1042584 -> 0 bytes
env/Scripts/_lzma.pyd | Bin 183448 -> 0 bytes
env/Scripts/_msi.pyd | Bin 32408 -> 0 bytes
env/Scripts/_multiprocessing.pyd | Bin 25240 -> 0 bytes
env/Scripts/_overlapped.pyd | Bin 33944 -> 0 bytes
env/Scripts/_socket.pyd | Bin 61592 -> 0 bytes
env/Scripts/_sqlite3.pyd | Bin 64152 -> 0 bytes
env/Scripts/_ssl.pyd | Bin 1458840 -> 0 bytes
env/Scripts/_testbuffer.pyd | Bin 41112 -> 0 bytes
env/Scripts/_testcapi.pyd | Bin 74904 -> 0 bytes
env/Scripts/_testconsole.pyd | Bin 20632 -> 0 bytes
env/Scripts/_testimportmultiple.pyd | Bin 19096 -> 0 bytes
env/Scripts/_testmultiphase.pyd | Bin 25752 -> 0 bytes
env/Scripts/_tkinter.pyd | Bin 52888 -> 0 bytes
env/Scripts/activate | 76 -
env/Scripts/activate.bat | 32 -
env/Scripts/deactivate.bat | 21 -
env/Scripts/easy_install-3.6.exe | Bin 89522 -> 0 bytes
env/Scripts/easy_install.exe | Bin 89522 -> 0 bytes
env/Scripts/pip.exe | Bin 89494 -> 0 bytes
env/Scripts/pip3.6.exe | Bin 89494 -> 0 bytes
env/Scripts/pip3.exe | Bin 89494 -> 0 bytes
env/Scripts/pyexpat.pyd | Bin 162968 -> 0 bytes
env/Scripts/python.exe | Bin 97944 -> 0 bytes
env/Scripts/python3.dll | Bin 58008 -> 0 bytes
env/Scripts/python36.dll | Bin 3267224 -> 0 bytes
env/Scripts/pythonw.exe | Bin 96408 -> 0 bytes
env/Scripts/select.pyd | Bin 23192 -> 0 bytes
env/Scripts/sqlite3.dll | Bin 866456 -> 0 bytes
env/Scripts/tcl86t.dll | Bin 1307136 -> 0 bytes
env/Scripts/tk86t.dll | Bin 1550336 -> 0 bytes
env/Scripts/unicodedata.pyd | Bin 895640 -> 0 bytes
env/Scripts/vcruntime140.dll | Bin 83784 -> 0 bytes
env/Scripts/winsound.pyd | Bin 24216 -> 0 bytes
env/pip-selfcheck.json | 1 -
env/pyvenv.cfg | 3 -
313 files changed, 96938 deletions(-)
delete mode 100644 env/Lib/site-packages/easy_install.py
delete mode 100644 env/Lib/site-packages/pip-9.0.1.dist-info/DESCRIPTION.rst
delete mode 100644 env/Lib/site-packages/pip-9.0.1.dist-info/INSTALLER
delete mode 100644 env/Lib/site-packages/pip-9.0.1.dist-info/METADATA
delete mode 100644 env/Lib/site-packages/pip-9.0.1.dist-info/RECORD
delete mode 100644 env/Lib/site-packages/pip-9.0.1.dist-info/WHEEL
delete mode 100644 env/Lib/site-packages/pip-9.0.1.dist-info/entry_points.txt
delete mode 100644 env/Lib/site-packages/pip-9.0.1.dist-info/metadata.json
delete mode 100644 env/Lib/site-packages/pip-9.0.1.dist-info/top_level.txt
delete mode 100644 env/Lib/site-packages/pip/__init__.py
delete mode 100644 env/Lib/site-packages/pip/__main__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/appdirs.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/cachecontrol/cache.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/cachecontrol/compat.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/colorama/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/colorama/ansi.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/colorama/initialise.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/colorama/win32.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/colorama/winterm.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/_backport/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/_backport/misc.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/compat.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/database.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/index.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/locators.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/manifest.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/markers.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/metadata.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/resources.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/scripts.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/t32.exe
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/t64.exe
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/util.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/version.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/w32.exe
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/w64.exe
delete mode 100644 env/Lib/site-packages/pip/_vendor/distlib/wheel.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/distro.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/_ihatexml.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/_inputstream.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/_tokenizer.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/_trie/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/_trie/_base.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/_trie/datrie.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/_trie/py.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/_utils.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/constants.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/filters/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/filters/base.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/filters/lint.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/filters/optionaltags.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/filters/sanitizer.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/filters/whitespace.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/html5parser.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/serializer.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/treeadapters/sax.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/base.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/dom.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/base.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/dom.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/etree.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/html5lib/treewalkers/genshi.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/ipaddress.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/lockfile/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/lockfile/linklockfile.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/lockfile/mkdirlockfile.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/lockfile/pidlockfile.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/lockfile/sqlitelockfile.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/lockfile/symlinklockfile.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/ordereddict.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/packaging/__about__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/packaging/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/packaging/_compat.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/packaging/_structures.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/packaging/markers.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/packaging/requirements.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/packaging/specifiers.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/packaging/utils.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/packaging/version.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/progress/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/progress/bar.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/progress/counter.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/progress/helpers.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/progress/spinner.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/pyparsing.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/re-vendor.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/requests/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/requests/adapters.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/requests/api.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/requests/auth.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/requests/cacert.pem
delete mode 100644 env/Lib/site-packages/pip/_vendor/requests/certs.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/requests/compat.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/requests/cookies.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/requests/exceptions.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/requests/hooks.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/requests/models.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/requests/sessions.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/requests/status_codes.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/requests/structures.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/requests/utils.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/retrying.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/six.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/webencodings/__init__.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/webencodings/labels.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/webencodings/mklabels.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/webencodings/tests.py
delete mode 100644 env/Lib/site-packages/pip/_vendor/webencodings/x_user_defined.py
delete mode 100644 env/Lib/site-packages/pip/basecommand.py
delete mode 100644 env/Lib/site-packages/pip/baseparser.py
delete mode 100644 env/Lib/site-packages/pip/cmdoptions.py
delete mode 100644 env/Lib/site-packages/pip/commands/__init__.py
delete mode 100644 env/Lib/site-packages/pip/commands/check.py
delete mode 100644 env/Lib/site-packages/pip/commands/completion.py
delete mode 100644 env/Lib/site-packages/pip/commands/download.py
delete mode 100644 env/Lib/site-packages/pip/commands/freeze.py
delete mode 100644 env/Lib/site-packages/pip/commands/hash.py
delete mode 100644 env/Lib/site-packages/pip/commands/help.py
delete mode 100644 env/Lib/site-packages/pip/commands/install.py
delete mode 100644 env/Lib/site-packages/pip/commands/list.py
delete mode 100644 env/Lib/site-packages/pip/commands/search.py
delete mode 100644 env/Lib/site-packages/pip/commands/show.py
delete mode 100644 env/Lib/site-packages/pip/commands/uninstall.py
delete mode 100644 env/Lib/site-packages/pip/commands/wheel.py
delete mode 100644 env/Lib/site-packages/pip/compat/__init__.py
delete mode 100644 env/Lib/site-packages/pip/compat/dictconfig.py
delete mode 100644 env/Lib/site-packages/pip/download.py
delete mode 100644 env/Lib/site-packages/pip/exceptions.py
delete mode 100644 env/Lib/site-packages/pip/index.py
delete mode 100644 env/Lib/site-packages/pip/locations.py
delete mode 100644 env/Lib/site-packages/pip/models/__init__.py
delete mode 100644 env/Lib/site-packages/pip/models/index.py
delete mode 100644 env/Lib/site-packages/pip/operations/__init__.py
delete mode 100644 env/Lib/site-packages/pip/operations/check.py
delete mode 100644 env/Lib/site-packages/pip/operations/freeze.py
delete mode 100644 env/Lib/site-packages/pip/pep425tags.py
delete mode 100644 env/Lib/site-packages/pip/req/__init__.py
delete mode 100644 env/Lib/site-packages/pip/req/req_file.py
delete mode 100644 env/Lib/site-packages/pip/req/req_install.py
delete mode 100644 env/Lib/site-packages/pip/req/req_set.py
delete mode 100644 env/Lib/site-packages/pip/req/req_uninstall.py
delete mode 100644 env/Lib/site-packages/pip/status_codes.py
delete mode 100644 env/Lib/site-packages/pip/utils/__init__.py
delete mode 100644 env/Lib/site-packages/pip/utils/appdirs.py
delete mode 100644 env/Lib/site-packages/pip/utils/build.py
delete mode 100644 env/Lib/site-packages/pip/utils/deprecation.py
delete mode 100644 env/Lib/site-packages/pip/utils/encoding.py
delete mode 100644 env/Lib/site-packages/pip/utils/filesystem.py
delete mode 100644 env/Lib/site-packages/pip/utils/glibc.py
delete mode 100644 env/Lib/site-packages/pip/utils/hashes.py
delete mode 100644 env/Lib/site-packages/pip/utils/logging.py
delete mode 100644 env/Lib/site-packages/pip/utils/outdated.py
delete mode 100644 env/Lib/site-packages/pip/utils/packaging.py
delete mode 100644 env/Lib/site-packages/pip/utils/setuptools_build.py
delete mode 100644 env/Lib/site-packages/pip/utils/ui.py
delete mode 100644 env/Lib/site-packages/pip/vcs/__init__.py
delete mode 100644 env/Lib/site-packages/pip/vcs/bazaar.py
delete mode 100644 env/Lib/site-packages/pip/vcs/git.py
delete mode 100644 env/Lib/site-packages/pip/vcs/mercurial.py
delete mode 100644 env/Lib/site-packages/pip/vcs/subversion.py
delete mode 100644 env/Lib/site-packages/pip/wheel.py
delete mode 100644 env/Lib/site-packages/pkg_resources/__init__.py
delete mode 100644 env/Lib/site-packages/pkg_resources/_vendor/__init__.py
delete mode 100644 env/Lib/site-packages/pkg_resources/_vendor/appdirs.py
delete mode 100644 env/Lib/site-packages/pkg_resources/_vendor/packaging/__about__.py
delete mode 100644 env/Lib/site-packages/pkg_resources/_vendor/packaging/__init__.py
delete mode 100644 env/Lib/site-packages/pkg_resources/_vendor/packaging/_compat.py
delete mode 100644 env/Lib/site-packages/pkg_resources/_vendor/packaging/_structures.py
delete mode 100644 env/Lib/site-packages/pkg_resources/_vendor/packaging/markers.py
delete mode 100644 env/Lib/site-packages/pkg_resources/_vendor/packaging/requirements.py
delete mode 100644 env/Lib/site-packages/pkg_resources/_vendor/packaging/specifiers.py
delete mode 100644 env/Lib/site-packages/pkg_resources/_vendor/packaging/utils.py
delete mode 100644 env/Lib/site-packages/pkg_resources/_vendor/packaging/version.py
delete mode 100644 env/Lib/site-packages/pkg_resources/_vendor/pyparsing.py
delete mode 100644 env/Lib/site-packages/pkg_resources/_vendor/six.py
delete mode 100644 env/Lib/site-packages/pkg_resources/extern/__init__.py
delete mode 100644 env/Lib/site-packages/setuptools-28.8.0.dist-info/DESCRIPTION.rst
delete mode 100644 env/Lib/site-packages/setuptools-28.8.0.dist-info/INSTALLER
delete mode 100644 env/Lib/site-packages/setuptools-28.8.0.dist-info/METADATA
delete mode 100644 env/Lib/site-packages/setuptools-28.8.0.dist-info/RECORD
delete mode 100644 env/Lib/site-packages/setuptools-28.8.0.dist-info/WHEEL
delete mode 100644 env/Lib/site-packages/setuptools-28.8.0.dist-info/dependency_links.txt
delete mode 100644 env/Lib/site-packages/setuptools-28.8.0.dist-info/entry_points.txt
delete mode 100644 env/Lib/site-packages/setuptools-28.8.0.dist-info/metadata.json
delete mode 100644 env/Lib/site-packages/setuptools-28.8.0.dist-info/top_level.txt
delete mode 100644 env/Lib/site-packages/setuptools-28.8.0.dist-info/zip-safe
delete mode 100644 env/Lib/site-packages/setuptools/__init__.py
delete mode 100644 env/Lib/site-packages/setuptools/archive_util.py
delete mode 100644 env/Lib/site-packages/setuptools/cli-32.exe
delete mode 100644 env/Lib/site-packages/setuptools/cli-64.exe
delete mode 100644 env/Lib/site-packages/setuptools/cli.exe
delete mode 100644 env/Lib/site-packages/setuptools/command/__init__.py
delete mode 100644 env/Lib/site-packages/setuptools/command/alias.py
delete mode 100644 env/Lib/site-packages/setuptools/command/bdist_egg.py
delete mode 100644 env/Lib/site-packages/setuptools/command/bdist_rpm.py
delete mode 100644 env/Lib/site-packages/setuptools/command/bdist_wininst.py
delete mode 100644 env/Lib/site-packages/setuptools/command/build_ext.py
delete mode 100644 env/Lib/site-packages/setuptools/command/build_py.py
delete mode 100644 env/Lib/site-packages/setuptools/command/develop.py
delete mode 100644 env/Lib/site-packages/setuptools/command/easy_install.py
delete mode 100644 env/Lib/site-packages/setuptools/command/egg_info.py
delete mode 100644 env/Lib/site-packages/setuptools/command/install.py
delete mode 100644 env/Lib/site-packages/setuptools/command/install_egg_info.py
delete mode 100644 env/Lib/site-packages/setuptools/command/install_lib.py
delete mode 100644 env/Lib/site-packages/setuptools/command/install_scripts.py
delete mode 100644 env/Lib/site-packages/setuptools/command/launcher manifest.xml
delete mode 100644 env/Lib/site-packages/setuptools/command/py36compat.py
delete mode 100644 env/Lib/site-packages/setuptools/command/register.py
delete mode 100644 env/Lib/site-packages/setuptools/command/rotate.py
delete mode 100644 env/Lib/site-packages/setuptools/command/saveopts.py
delete mode 100644 env/Lib/site-packages/setuptools/command/sdist.py
delete mode 100644 env/Lib/site-packages/setuptools/command/setopt.py
delete mode 100644 env/Lib/site-packages/setuptools/command/test.py
delete mode 100644 env/Lib/site-packages/setuptools/command/upload.py
delete mode 100644 env/Lib/site-packages/setuptools/command/upload_docs.py
delete mode 100644 env/Lib/site-packages/setuptools/depends.py
delete mode 100644 env/Lib/site-packages/setuptools/dist.py
delete mode 100644 env/Lib/site-packages/setuptools/extension.py
delete mode 100644 env/Lib/site-packages/setuptools/extern/__init__.py
delete mode 100644 env/Lib/site-packages/setuptools/glob.py
delete mode 100644 env/Lib/site-packages/setuptools/gui-32.exe
delete mode 100644 env/Lib/site-packages/setuptools/gui-64.exe
delete mode 100644 env/Lib/site-packages/setuptools/gui.exe
delete mode 100644 env/Lib/site-packages/setuptools/launch.py
delete mode 100644 env/Lib/site-packages/setuptools/lib2to3_ex.py
delete mode 100644 env/Lib/site-packages/setuptools/monkey.py
delete mode 100644 env/Lib/site-packages/setuptools/msvc.py
delete mode 100644 env/Lib/site-packages/setuptools/namespaces.py
delete mode 100644 env/Lib/site-packages/setuptools/package_index.py
delete mode 100644 env/Lib/site-packages/setuptools/py26compat.py
delete mode 100644 env/Lib/site-packages/setuptools/py27compat.py
delete mode 100644 env/Lib/site-packages/setuptools/py31compat.py
delete mode 100644 env/Lib/site-packages/setuptools/sandbox.py
delete mode 100644 env/Lib/site-packages/setuptools/script (dev).tmpl
delete mode 100644 env/Lib/site-packages/setuptools/script.tmpl
delete mode 100644 env/Lib/site-packages/setuptools/site-patch.py
delete mode 100644 env/Lib/site-packages/setuptools/ssl_support.py
delete mode 100644 env/Lib/site-packages/setuptools/unicode_utils.py
delete mode 100644 env/Lib/site-packages/setuptools/version.py
delete mode 100644 env/Lib/site-packages/setuptools/windows_support.py
delete mode 100644 env/Lib/tcl8.6/init.tcl
delete mode 100644 env/Scripts/Activate.ps1
delete mode 100644 env/Scripts/_asyncio.pyd
delete mode 100644 env/Scripts/_bz2.pyd
delete mode 100644 env/Scripts/_ctypes.pyd
delete mode 100644 env/Scripts/_ctypes_test.pyd
delete mode 100644 env/Scripts/_decimal.pyd
delete mode 100644 env/Scripts/_elementtree.pyd
delete mode 100644 env/Scripts/_hashlib.pyd
delete mode 100644 env/Scripts/_lzma.pyd
delete mode 100644 env/Scripts/_msi.pyd
delete mode 100644 env/Scripts/_multiprocessing.pyd
delete mode 100644 env/Scripts/_overlapped.pyd
delete mode 100644 env/Scripts/_socket.pyd
delete mode 100644 env/Scripts/_sqlite3.pyd
delete mode 100644 env/Scripts/_ssl.pyd
delete mode 100644 env/Scripts/_testbuffer.pyd
delete mode 100644 env/Scripts/_testcapi.pyd
delete mode 100644 env/Scripts/_testconsole.pyd
delete mode 100644 env/Scripts/_testimportmultiple.pyd
delete mode 100644 env/Scripts/_testmultiphase.pyd
delete mode 100644 env/Scripts/_tkinter.pyd
delete mode 100644 env/Scripts/activate
delete mode 100644 env/Scripts/activate.bat
delete mode 100644 env/Scripts/deactivate.bat
delete mode 100644 env/Scripts/easy_install-3.6.exe
delete mode 100644 env/Scripts/easy_install.exe
delete mode 100644 env/Scripts/pip.exe
delete mode 100644 env/Scripts/pip3.6.exe
delete mode 100644 env/Scripts/pip3.exe
delete mode 100644 env/Scripts/pyexpat.pyd
delete mode 100644 env/Scripts/python.exe
delete mode 100644 env/Scripts/python3.dll
delete mode 100644 env/Scripts/python36.dll
delete mode 100644 env/Scripts/pythonw.exe
delete mode 100644 env/Scripts/select.pyd
delete mode 100644 env/Scripts/sqlite3.dll
delete mode 100644 env/Scripts/tcl86t.dll
delete mode 100644 env/Scripts/tk86t.dll
delete mode 100644 env/Scripts/unicodedata.pyd
delete mode 100644 env/Scripts/vcruntime140.dll
delete mode 100644 env/Scripts/winsound.pyd
delete mode 100644 env/pip-selfcheck.json
delete mode 100644 env/pyvenv.cfg
diff --git a/env/Lib/site-packages/easy_install.py b/env/Lib/site-packages/easy_install.py
deleted file mode 100644
index d87e984..0000000
--- a/env/Lib/site-packages/easy_install.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""Run the EasyInstall command"""
-
-if __name__ == '__main__':
- from setuptools.command.easy_install import main
- main()
diff --git a/env/Lib/site-packages/pip-9.0.1.dist-info/DESCRIPTION.rst b/env/Lib/site-packages/pip-9.0.1.dist-info/DESCRIPTION.rst
deleted file mode 100644
index 8ef94c4..0000000
--- a/env/Lib/site-packages/pip-9.0.1.dist-info/DESCRIPTION.rst
+++ /dev/null
@@ -1,39 +0,0 @@
-pip
-===
-
-The `PyPA recommended
-`_
-tool for installing Python packages.
-
-* `Installation `_
-* `Documentation `_
-* `Changelog `_
-* `Github Page `_
-* `Issue Tracking `_
-* `User mailing list `_
-* `Dev mailing list `_
-* User IRC: #pypa on Freenode.
-* Dev IRC: #pypa-dev on Freenode.
-
-
-.. image:: https://img.shields.io/pypi/v/pip.svg
- :target: https://pypi.python.org/pypi/pip
-
-.. image:: https://img.shields.io/travis/pypa/pip/master.svg
- :target: http://travis-ci.org/pypa/pip
-
-.. image:: https://img.shields.io/appveyor/ci/pypa/pip.svg
- :target: https://ci.appveyor.com/project/pypa/pip/history
-
-.. image:: https://readthedocs.org/projects/pip/badge/?version=stable
- :target: https://pip.pypa.io/en/stable
-
-Code of Conduct
----------------
-
-Everyone interacting in the pip project's codebases, issue trackers, chat
-rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_.
-
-.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/
-
-
diff --git a/env/Lib/site-packages/pip-9.0.1.dist-info/INSTALLER b/env/Lib/site-packages/pip-9.0.1.dist-info/INSTALLER
deleted file mode 100644
index a1b589e..0000000
--- a/env/Lib/site-packages/pip-9.0.1.dist-info/INSTALLER
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/env/Lib/site-packages/pip-9.0.1.dist-info/METADATA b/env/Lib/site-packages/pip-9.0.1.dist-info/METADATA
deleted file mode 100644
index 600a905..0000000
--- a/env/Lib/site-packages/pip-9.0.1.dist-info/METADATA
+++ /dev/null
@@ -1,69 +0,0 @@
-Metadata-Version: 2.0
-Name: pip
-Version: 9.0.1
-Summary: The PyPA recommended tool for installing Python packages.
-Home-page: https://pip.pypa.io/
-Author: The pip developers
-Author-email: python-virtualenv@groups.google.com
-License: MIT
-Keywords: easy_install distutils setuptools egg virtualenv
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Topic :: Software Development :: Build Tools
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: Implementation :: PyPy
-Requires-Python: >=2.6,!=3.0.*,!=3.1.*,!=3.2.*
-Provides-Extra: testing
-Requires-Dist: mock; extra == 'testing'
-Requires-Dist: pretend; extra == 'testing'
-Requires-Dist: pytest; extra == 'testing'
-Requires-Dist: scripttest (>=1.3); extra == 'testing'
-Requires-Dist: virtualenv (>=1.10); extra == 'testing'
-
-pip
-===
-
-The `PyPA recommended
-`_
-tool for installing Python packages.
-
-* `Installation `_
-* `Documentation `_
-* `Changelog `_
-* `Github Page `_
-* `Issue Tracking `_
-* `User mailing list `_
-* `Dev mailing list `_
-* User IRC: #pypa on Freenode.
-* Dev IRC: #pypa-dev on Freenode.
-
-
-.. image:: https://img.shields.io/pypi/v/pip.svg
- :target: https://pypi.python.org/pypi/pip
-
-.. image:: https://img.shields.io/travis/pypa/pip/master.svg
- :target: http://travis-ci.org/pypa/pip
-
-.. image:: https://img.shields.io/appveyor/ci/pypa/pip.svg
- :target: https://ci.appveyor.com/project/pypa/pip/history
-
-.. image:: https://readthedocs.org/projects/pip/badge/?version=stable
- :target: https://pip.pypa.io/en/stable
-
-Code of Conduct
----------------
-
-Everyone interacting in the pip project's codebases, issue trackers, chat
-rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_.
-
-.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/
-
-
diff --git a/env/Lib/site-packages/pip-9.0.1.dist-info/RECORD b/env/Lib/site-packages/pip-9.0.1.dist-info/RECORD
deleted file mode 100644
index 9ad92f4..0000000
--- a/env/Lib/site-packages/pip-9.0.1.dist-info/RECORD
+++ /dev/null
@@ -1,501 +0,0 @@
-pip/__init__.py,sha256=00QWSreEBjb8Y8sPs8HeqgLXSB-3UrONJxo4J5APxEc,11348
-pip/__main__.py,sha256=V6Kh-IEDEFpt1cahRE6MajUF_14qJR_Qsvn4MjWZXzE,584
-pip/basecommand.py,sha256=TTlmZesQ4Vuxcto2KqwZGmgmN5ioHEl_DeFev9ie_SA,11910
-pip/baseparser.py,sha256=AKMOeF3fTrRroiv0DmTQbdiLW0DQux2KqGC_dJJB9d0,10465
-pip/cmdoptions.py,sha256=8JCcF2kKAF2cFnV77oW-3DsHJifr9jF2WuChzzwgcwg,16474
-pip/download.py,sha256=rA0wbmqC2n9ejX481YJSidmKgQqQDjdaxkHkHlAN68k,32171
-pip/exceptions.py,sha256=BvqH-Jw3tP2b-2IJ2kjrQemOAPMqKrQMLRIZHZQpJXk,8121
-pip/index.py,sha256=L6UhtAEZc2qw7BqfQrkPQcw2gCgEw3GukLRSA95BNyI,39950
-pip/locations.py,sha256=9rJRlgonC6QC2zGDIn_7mXaoZ9_tF_IHM2BQhWVRgbo,5626
-pip/pep425tags.py,sha256=q3kec4f6NHszuGYIhGIbVvs896D06uJAnKFgJ_wce44,10980
-pip/status_codes.py,sha256=F6uDG6Gj7RNKQJUDnd87QKqI16Us-t-B0wPF_4QMpWc,156
-pip/wheel.py,sha256=QSWmGs2ui-n4UMWm0JUY6aMCcwNKungVzbWsxI9KlJQ,32010
-pip/_vendor/__init__.py,sha256=WaaSJ3roSSJ_Uv4yKAxlGohKEH9YUA3aIh1Xg2IjfgU,4670
-pip/_vendor/appdirs.py,sha256=-9UOIZy62ahCQVY9-b7Nn6_5_4Y6ooHnv72tM8iHi9Y,22368
-pip/_vendor/distro.py,sha256=A4Douw9pcqdYxDTp5b-OR02fxVXnfWs-wC1wA89rhRk,38349
-pip/_vendor/ipaddress.py,sha256=wimbqcE7rwwETlucn8A_4Qd_-NKXPOBcNxJHarUoXng,80176
-pip/_vendor/ordereddict.py,sha256=4KsFuc6V8IgHROCHUu-4vCrr21ZPPea7Z0cvX9AjQ7w,4094
-pip/_vendor/pyparsing.py,sha256=7vAuUVbh6txUKQR2IzJ8_9DKmD5vtm5MDssWkI0ka8o,224171
-pip/_vendor/re-vendor.py,sha256=PcdZ40d0ohMsdJmA4t0AeAWbPXi1tFsvAwA5KE5FGeY,773
-pip/_vendor/retrying.py,sha256=k3fflf5_Mm0XcIJYhB7Tj34bqCCPhUDkYbx1NvW2FPE,9972
-pip/_vendor/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098
-pip/_vendor/cachecontrol/__init__.py,sha256=UPyFlz0dIjxusu5ITig9UDFJdSY5LTwijhldn0AfyzU,302
-pip/_vendor/cachecontrol/_cmd.py,sha256=MPxZfZd2LKDzVrs55X3wA1rsI2YuP8evLZSwQj0dIk0,1320
-pip/_vendor/cachecontrol/adapter.py,sha256=RaGYyRA-RA1J0AnE67GzEYFPBu4YH4EQUvQqTKa57iM,4608
-pip/_vendor/cachecontrol/cache.py,sha256=xtl-V-pr9KSt9VvFDRCB9yrHPEvqvbk-5M1vAInZb5k,790
-pip/_vendor/cachecontrol/compat.py,sha256=uyovOpd1ehI3J1XeBqJvcsIp6fvkjBpoQmu_0J2st8c,416
-pip/_vendor/cachecontrol/controller.py,sha256=elDsLcaYA15ncodRmHnWQp6ekU_ocEGtDeGLbsnTjzo,13024
-pip/_vendor/cachecontrol/filewrapper.py,sha256=_K8cStmXqD33m15PfsQ8rlpo6FfXjVbKmjvLXyICRgI,2531
-pip/_vendor/cachecontrol/heuristics.py,sha256=WtJrVsyWjpP9WoUiDVdTZZRNBCz5ZVptaQpYnqofDQU,4141
-pip/_vendor/cachecontrol/serialize.py,sha256=XM6elG9DSNexwaOCgMjUtfrHHW5NAB6TSbIf3x235xs,6536
-pip/_vendor/cachecontrol/wrapper.py,sha256=Kqyu_3TW_54XDudha4-HF21vyEOAJ4ZnRXFysTiLmXA,498
-pip/_vendor/cachecontrol/caches/__init__.py,sha256=uWnUtyMvHY_LULaL_4_IR1F_xPgK5zHfJyRnBq4DnPE,369
-pip/_vendor/cachecontrol/caches/file_cache.py,sha256=FsDug3bwUAQ3okjjfGzxlDaBf2fwVSn1iBKMTL6SyGU,3532
-pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=XywqxkS9MkCaflTOY_wjrE02neKdywB9YwlOBbP7Ywc,973
-pip/_vendor/colorama/__init__.py,sha256=9xByrTvk9upkL5NGV5It2Eje4-kzNLwa_1lGPWpXoNU,240
-pip/_vendor/colorama/ansi.py,sha256=Fi0un-QLqRm-v7o_nKiOqyC8PapBJK7DLV_q9LKtTO0,2524
-pip/_vendor/colorama/ansitowin32.py,sha256=gJZB35Lbdjatykd2zrUUnokMzkvcFgscyn_tNxxMFHA,9668
-pip/_vendor/colorama/initialise.py,sha256=cHqVJtb82OG7HUCxvQ2joG7N_CoxbIKbI_fgryZkj20,1917
-pip/_vendor/colorama/win32.py,sha256=_SCEoTK_GA2tU1nhbayKKac-v9Jn98lCPIFOeFMGCHQ,5365
-pip/_vendor/colorama/winterm.py,sha256=V7U7ojwG1q4n6PKripjEvW_htYQi5ueXSM3LUUoqqDY,6290
-pip/_vendor/distlib/__init__.py,sha256=-aUeNNCfiIG_1Tqf19BH0xLNuBKGX1I7lNhcLYgFUEA,581
-pip/_vendor/distlib/compat.py,sha256=FzKlP9dNUMH-j_1LCVnjgx6KgUbpnRjTjYkTkDYRPlI,40801
-pip/_vendor/distlib/database.py,sha256=jniJmYk0Mj2t6gZYbnn68TvQwnVZ0kXyeuf_3AxFclk,49672
-pip/_vendor/distlib/index.py,sha256=Cw8gxFq_7xXvdgExL3efjLAY3EAPDMSL3VA42RkbQBs,21085
-pip/_vendor/distlib/locators.py,sha256=hD_Hm3aSL9DklY9Cxyct2n_74gZ0xNFFGB5L7M6ds14,51013
-pip/_vendor/distlib/manifest.py,sha256=3qEuZhHlDbvyYZ1BZbdapDAivgMgUwWpZ00cmXqcn18,14810
-pip/_vendor/distlib/markers.py,sha256=iRrVWwpyVwjkKJSX8NEQ92_MRMwpROcfNGKCD-Ch1QM,6282
-pip/_vendor/distlib/metadata.py,sha256=hUsf7Qh2Ae4CCkL33qK8ppwC8ZTzT7ep6Hj9RKpijKU,38833
-pip/_vendor/distlib/resources.py,sha256=VFBVbFqLVqDBSQDXcFQHrX1KEcuoDxTK699Ydi_beyc,10766
-pip/_vendor/distlib/scripts.py,sha256=xpehNfISGPTNxQZu02K9Rw2QbNx_2Q4emePv3W5X0iw,15224
-pip/_vendor/distlib/t32.exe,sha256=cp0UAUDDr1tGAx8adlKxWbCHIa-oB3bxev5zYzgAr8E,89088
-pip/_vendor/distlib/t64.exe,sha256=FiljDPcX9qvoe9FYE_9pNEHqbqMnhcCOuI_oLJ4F9F8,97792
-pip/_vendor/distlib/util.py,sha256=E2wU-RZShPMFUMJr9kPmemTULinM4qDzosNPihCuKE0,52991
-pip/_vendor/distlib/version.py,sha256=CgghOUylxGD7dEA2S3MvWjx7mY_2bWsluF0Of3Yxl4Y,23711
-pip/_vendor/distlib/w32.exe,sha256=LItrBJesEqt2QTQuB-yha2YbMegURHmHmdSxhjBqmnc,85504
-pip/_vendor/distlib/w64.exe,sha256=n_PioBC7ltz7sAk1WLbLzZJgS4R2axSy_0HPf8ZCsEg,94208
-pip/_vendor/distlib/wheel.py,sha256=UP53cKxOM5r7bHSS-n5prF6hwJEVsMW9ZNJutOuC26c,39115
-pip/_vendor/distlib/_backport/__init__.py,sha256=bqS_dTOH6uW9iGgd0uzfpPjo6vZ4xpPZ7kyfZJ2vNaw,274
-pip/_vendor/distlib/_backport/misc.py,sha256=KWecINdbFNOxSOP1fGF680CJnaC6S4fBRgEtaYTw0ig,971
-pip/_vendor/distlib/_backport/shutil.py,sha256=VW1t3uYqUjWZH7jV-6QiimLhnldoV5uIpH4EuiT1jfw,25647
-pip/_vendor/distlib/_backport/sysconfig.cfg,sha256=swZKxq9RY5e9r3PXCrlvQPMsvOdiWZBTHLEbqS8LJLU,2617
-pip/_vendor/distlib/_backport/sysconfig.py,sha256=eSEyJg7jxF_eHlHG8IOtl93kb07UoMIRp1wYsPeGi9k,26955
-pip/_vendor/distlib/_backport/tarfile.py,sha256=Ihp7rXRcjbIKw8COm9wSePV9ARGXbSF9gGXAMn2Q-KU,92628
-pip/_vendor/html5lib/__init__.py,sha256=JsIwmFldk-9raBadPSTS74JrfmJvozc-3aekMi7Hr9s,780
-pip/_vendor/html5lib/_ihatexml.py,sha256=tzXygYmisUmiEUt2v7E1Ab50AKQsrD-SglPRnY75vME,16705
-pip/_vendor/html5lib/_inputstream.py,sha256=C4lX5gUBwebOWy41hYP2ZBpkPVNvxk_hZBm3OVyPZM4,32532
-pip/_vendor/html5lib/_tokenizer.py,sha256=YAaOEBD6qc5ISq9Xt9Nif1OFgcybTTfMdwqBkZhpAq4,76580
-pip/_vendor/html5lib/_utils.py,sha256=bS6THVlL8ZyTcI6CIxiM6xxuHsE8i1j5Ogd3Ha1G84U,4096
-pip/_vendor/html5lib/constants.py,sha256=Dfc1Fv3_9frktgWjg4tbj-CjMMp02Ko9qMe4il1BVdo,83387
-pip/_vendor/html5lib/html5parser.py,sha256=Dmlu9hlq5w_id6mBZyY_sE5LukIACgvG4kpgIsded8Q,117170
-pip/_vendor/html5lib/serializer.py,sha256=Urrsa0cPPLqNX-UbJWS2gUhs_06qVbNxZvUnrmGZK6E,14177
-pip/_vendor/html5lib/_trie/__init__.py,sha256=8VR1bcgD2OpeS2XExpu5yBhP_Q1K-lwKbBKICBPf1kU,289
-pip/_vendor/html5lib/_trie/_base.py,sha256=6P_AcIoGjtwB2qAlhV8H4VP-ztQxoXFGwt4NyMqG_Kw,979
-pip/_vendor/html5lib/_trie/datrie.py,sha256=EQpqSfkZRuTbE-DuhW7xMdVDxdZNZ0CfmnYfHA_3zxM,1178
-pip/_vendor/html5lib/_trie/py.py,sha256=wXmQLrZRf4MyWNyg0m3h81m9InhLR7GJ002mIIZh-8o,1775
-pip/_vendor/html5lib/filters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/_vendor/html5lib/filters/alphabeticalattributes.py,sha256=DXv-P2vdQ5F3OTWM6QZ6KhyDlAWm90pbfrD1Bk9D_l0,621
-pip/_vendor/html5lib/filters/base.py,sha256=z-IU9ZAYjpsVsqmVt7kuWC63jR11hDMr6CVrvuao8W0,286
-pip/_vendor/html5lib/filters/inject_meta_charset.py,sha256=2Q_JnMscn_tNbV_qpgYN_5M3PnBGfmuvECMKDExHUcY,2742
-pip/_vendor/html5lib/filters/lint.py,sha256=qf5cLrT6xXd8V7GH1R_3lKxIjuJSfpbWTpSwaglYdDw,3365
-pip/_vendor/html5lib/filters/optionaltags.py,sha256=EHig4kM-QiLjuxVJ3FAAFNy-10k4aV6HJbQzHKZ_3u8,10534
-pip/_vendor/html5lib/filters/sanitizer.py,sha256=7PqJrhm6mo3JvaHk2IQW7i74Or7Qtd-FV8UftJIyDys,25112
-pip/_vendor/html5lib/filters/whitespace.py,sha256=KPt067nYTqqi8KLTClyynn4eVzNDC_-MApXNVHRXVX0,1139
-pip/_vendor/html5lib/treeadapters/__init__.py,sha256=l3LcqMSEyoh99Jh_eWjGexHnIvKhLAXoP-LDz88whuM,208
-pip/_vendor/html5lib/treeadapters/genshi.py,sha256=6VIuHDNoExv1JWv3ePj6V5CM-tcyiUSWe5_Hd2ejbwY,1555
-pip/_vendor/html5lib/treeadapters/sax.py,sha256=3of4vvaUYIAic7pngebwJV24hpOS7Zg9ggJa_WQegy4,1661
-pip/_vendor/html5lib/treebuilders/__init__.py,sha256=UlB4orkTgZhFIKQdXrtiWn9cpKSsuhnOQOIHeD0Fv4k,3406
-pip/_vendor/html5lib/treebuilders/base.py,sha256=4vdjm_Z2f_GTQBwKnWlrzVcctTb-K5sfN8pXDaWODiA,13942
-pip/_vendor/html5lib/treebuilders/dom.py,sha256=SY3MsijXyzdNPc8aK5IQsupBoM8J67y56DgNtGvsb9g,8835
-pip/_vendor/html5lib/treebuilders/etree.py,sha256=aqIBOGj_dFYqBURIcTegGNBhAIJOw5iFDHb4jrkYH-8,12764
-pip/_vendor/html5lib/treebuilders/etree_lxml.py,sha256=CEgwHMIQZvIDFAqct4kqPkVtyKIm9efHFq_VeExEPCA,14161
-pip/_vendor/html5lib/treewalkers/__init__.py,sha256=CFpUOCfLuhAgVJ8NYk9wviCu1khYnv7XRStvyzU1Fws,5544
-pip/_vendor/html5lib/treewalkers/base.py,sha256=ei-2cFbNFd0gRjyaFmxnxZGLNID4o0bHFCH9bMyZ5Bk,4939
-pip/_vendor/html5lib/treewalkers/dom.py,sha256=EHyFR8D8lYNnyDU9lx_IKigVJRyecUGua0mOi7HBukc,1413
-pip/_vendor/html5lib/treewalkers/etree.py,sha256=8jVLEY2FjgN4RFugwhAh44l9ScVYoDStQFCnlPwvafI,4684
-pip/_vendor/html5lib/treewalkers/etree_lxml.py,sha256=sY6wfRshWTllu6n48TPWpKsQRPp-0CQrT0hj_AdzHSU,6309
-pip/_vendor/html5lib/treewalkers/genshi.py,sha256=4D2PECZ5n3ZN3qu3jMl9yY7B81jnQApBQSVlfaIuYbA,2309
-pip/_vendor/lockfile/__init__.py,sha256=Tqpz90DwKYfhPsfzVOJl84TL87pdFE5ePNHdXAxs4Tk,9371
-pip/_vendor/lockfile/linklockfile.py,sha256=C7OH3H4GdK68u4FQgp8fkP2kO4fyUTSyj3X6blgfobc,2652
-pip/_vendor/lockfile/mkdirlockfile.py,sha256=e3qgIL-etZMLsS-3ft19iW_8IQ360HNkGOqE3yBKsUw,3096
-pip/_vendor/lockfile/pidlockfile.py,sha256=ukH9uk6NFuxyVmG5QiWw4iKq3fT7MjqUguX95avYPIY,6090
-pip/_vendor/lockfile/sqlitelockfile.py,sha256=o2TMkMRY0iwn-iL1XMRRIFStMUkS4i3ajceeYNntKFg,5506
-pip/_vendor/lockfile/symlinklockfile.py,sha256=ABwXXmvTHvCl5viPblShL3PG-gGsLiT1roAMfDRwhi8,2616
-pip/_vendor/packaging/__about__.py,sha256=zkcCPTN_6TcLW0Nrlg0176-R1QQ_WVPTm8sz1R4-HjM,720
-pip/_vendor/packaging/__init__.py,sha256=_vNac5TrzwsrzbOFIbF-5cHqc_Y2aPT2D7zrIR06BOo,513
-pip/_vendor/packaging/_compat.py,sha256=Vi_A0rAQeHbU-a9X0tt1yQm9RqkgQbDSxzRw8WlU9kA,860
-pip/_vendor/packaging/_structures.py,sha256=RImECJ4c_wTlaTYYwZYLHEiebDMaAJmK1oPARhw1T5o,1416
-pip/_vendor/packaging/markers.py,sha256=mtg2nphJE1oQO39g1DgsdPsMO-guBBClpR-AEYFrbMg,8230
-pip/_vendor/packaging/requirements.py,sha256=SD7dVJGjdPUqtoHb47qwK6wWJTQd-ZXWjxpJg83UcBA,4327
-pip/_vendor/packaging/specifiers.py,sha256=SAMRerzO3fK2IkFZCaZkuwZaL_EGqHNOz4pni4vhnN0,28025
-pip/_vendor/packaging/utils.py,sha256=3m6WvPm6NNxE8rkTGmn0r75B_GZSGg7ikafxHsBN1WA,421
-pip/_vendor/packaging/version.py,sha256=OwGnxYfr2ghNzYx59qWIBkrK3SnB6n-Zfd1XaLpnnM0,11556
-pip/_vendor/pkg_resources/__init__.py,sha256=CcwuHtCBZn9OTkmgF9cFpadIAMhlrnZTVKTOo4V2p58,103230
-pip/_vendor/progress/__init__.py,sha256=Wn1074LUDZovd4zfoVYojnPBgOc6ctHbQX7rp_p8lRA,3023
-pip/_vendor/progress/bar.py,sha256=YNPJeRrwYVKFO2nyaEwsQjYByamMWTgJMvQO1NpD-AY,2685
-pip/_vendor/progress/counter.py,sha256=kEqA8jWEdwrc6P_9VaRx7bjOHwk9gxl-Q9oVbQ08v5c,1502
-pip/_vendor/progress/helpers.py,sha256=FehfwZTv-5cCfsbcMlvlUkm3xZ0cRhsev6XVpmeTF4c,2854
-pip/_vendor/progress/spinner.py,sha256=iCVtUQbaJUFHTjn1ZLPQLPYeao4lC9aXAa_HxIeUK6k,1314
-pip/_vendor/requests/__init__.py,sha256=Cde-qxOWcslaEcPvKAJQPFbY8_va8PMbU7Rssr7vViI,2326
-pip/_vendor/requests/adapters.py,sha256=DJdgax91PyS2s6_oZPELbuLWNlM2xGguNu62sqcOUik,19740
-pip/_vendor/requests/api.py,sha256=PgminOpD8hLLKLNs0RWLKr1HpNc4Qxr_6uen8q2c9CI,5794
-pip/_vendor/requests/auth.py,sha256=eBLtJlcTZxRG7xKXCvGQBLO9a-PxFgMf2qTUbtZwMJM,8175
-pip/_vendor/requests/cacert.pem,sha256=5xzWFRrSP0ZsXiW6emg8UQ_w497lT4qWCv32OO8R1ME,344712
-pip/_vendor/requests/certs.py,sha256=Aa-oStu9f2lVi8VM9Aw1xaAtTIz7bhu5CGKNPEW1waM,625
-pip/_vendor/requests/compat.py,sha256=0cgWB43LEX5OrX1O4k-bPbFlIbWXgEd412DSDJtF1Y8,1687
-pip/_vendor/requests/cookies.py,sha256=awMI0hm3SKheMEDTqO8AIadc2XmnCGKPCTNw_4hlM3Q,18208
-pip/_vendor/requests/exceptions.py,sha256=x-MGvDASYKSstuCNYTA5IT_EAcxTp5knE3WPMrgkrlI,2860
-pip/_vendor/requests/hooks.py,sha256=HXAHoC1FNTFRZX6-lNdvPM7Tst4kvGwYTN-AOKRxoRU,767
-pip/_vendor/requests/models.py,sha256=YHuL2khGDFxeWc-NMJIcfFqvYJ0dKs1mXfj1Fuff1J8,30532
-pip/_vendor/requests/sessions.py,sha256=H7HpKRLKeu1MSH5W1-PI2GMCFLN4bz5i3OFqjjgzE5k,25609
-pip/_vendor/requests/status_codes.py,sha256=uwVHcMPkHV3FElDLlnDTH3KULZIAGxaovbBxrjWm8N0,3316
-pip/_vendor/requests/structures.py,sha256=yexCvWbX40M6E8mLQOpAGZZ-ZoAnyaT2dni-Bp-b42g,3012
-pip/_vendor/requests/utils.py,sha256=9d3jqnA8avsF9N1QPmsk2pJgo2pxuExrN2hoIhtLggY,24163
-pip/_vendor/requests/packages/__init__.py,sha256=CVheqNRcXIkAi5037RhxeqbAqd0QhrK1o9R9kS2xvuI,1384
-pip/_vendor/requests/packages/chardet/__init__.py,sha256=XuTKCYOR7JwsoHxqZTYH86LVyMDbDI3s1s0W_qoGEBM,1295
-pip/_vendor/requests/packages/chardet/big5freq.py,sha256=D8oTdz-GM7Jg8TsaWJDm65vM_OLHC3xub6qUJ3rOgsQ,82594
-pip/_vendor/requests/packages/chardet/big5prober.py,sha256=XX96C--6WKYW36mL-z7pJSAtc169Z8ZImByCP4pEN9A,1684
-pip/_vendor/requests/packages/chardet/chardetect.py,sha256=f4299UZG6uWd3i3r_N0OdrFj2sA9JFI54PAmDLAFmWA,2504
-pip/_vendor/requests/packages/chardet/chardistribution.py,sha256=cUARQFr1oTLXeJCDQrDRkUP778AvSMzhSCnG8VLCV58,9226
-pip/_vendor/requests/packages/chardet/charsetgroupprober.py,sha256=0lKk7VE516fgMw119tNefFqLOxKfIE9WfdkpIT69OKU,3791
-pip/_vendor/requests/packages/chardet/charsetprober.py,sha256=Z48o2KiOj23FNqYH8FqzhH5m1qdm3rI8DcTm2Yqtklg,1902
-pip/_vendor/requests/packages/chardet/codingstatemachine.py,sha256=E85rYhHVMw9xDEJVgiQhp0OnLGr6i2r8_7QOWMKTH08,2318
-pip/_vendor/requests/packages/chardet/compat.py,sha256=5mm6yrHwef1JEG5OxkPJlSq5lkjLVpEGh3iPgFBkpkM,1157
-pip/_vendor/requests/packages/chardet/constants.py,sha256=-UnY8U7EP7z9fTyd09yq35BEkSFEAUAiv9ohd1DW1s4,1335
-pip/_vendor/requests/packages/chardet/cp949prober.py,sha256=FMvdLyB7fejPXRsTbca7LK1P3RUvvssmjUNyaEfz8zY,1782
-pip/_vendor/requests/packages/chardet/escprober.py,sha256=q5TcQKeVq31WxrW7Sv8yjpZkjEoaHO8S92EJZ9hodys,3187
-pip/_vendor/requests/packages/chardet/escsm.py,sha256=7iljEKN8lXTh8JFXPUSwlibMno6R6ksq4evLxbkzfro,7839
-pip/_vendor/requests/packages/chardet/eucjpprober.py,sha256=5IpfSEjAb7h3hcGMd6dkU80O900C2N6xku28rdYFKuc,3678
-pip/_vendor/requests/packages/chardet/euckrfreq.py,sha256=T5saK5mImySG5ygQPtsp6o2uKulouCwYm2ElOyFkJqU,45978
-pip/_vendor/requests/packages/chardet/euckrprober.py,sha256=Wo7dnZ5Erw_nB4H-m5alMiOxOuJUmGHlwCSaGqExDZA,1675
-pip/_vendor/requests/packages/chardet/euctwfreq.py,sha256=G_I0BW9i1w0ONeeUwIYqV7_U09buIHdqh-wNHVaql7I,34872
-pip/_vendor/requests/packages/chardet/euctwprober.py,sha256=upS2P6GuT5ujOxXYw-RJLcT7A4PTuo27KGUKU4UZpIQ,1676
-pip/_vendor/requests/packages/chardet/gb2312freq.py,sha256=M2gFdo_qQ_BslStEchrPW5CrPEZEacC0uyDLw4ok-kY,36011
-pip/_vendor/requests/packages/chardet/gb2312prober.py,sha256=VWnjoRa83Y6V6oczMaxyUr0uy48iCnC2nzk9zfEIRHc,1681
-pip/_vendor/requests/packages/chardet/hebrewprober.py,sha256=8pdoUfsVXf_L4BnJde_BewS6H2yInV5688eu0nFhLHY,13359
-pip/_vendor/requests/packages/chardet/jisfreq.py,sha256=ZcL4R5ekHHbP2KCYGakVMBsiKqZZZAABzhwi-uRkOps,47315
-pip/_vendor/requests/packages/chardet/jpcntx.py,sha256=yftmp0QaF6RJO5SJs8I7LU5AF4rwP23ebeCQL4BM1OY,19348
-pip/_vendor/requests/packages/chardet/langbulgarianmodel.py,sha256=ZyPsA796MSVhYdfWhMCgKWckupAKAnKqWcE3Cl3ej6o,12784
-pip/_vendor/requests/packages/chardet/langcyrillicmodel.py,sha256=fkcd5OvogUp-GrNDWAZPgkYsSRCD2omotAEvqjlmLKE,17725
-pip/_vendor/requests/packages/chardet/langgreekmodel.py,sha256=QHMy31CH_ot67UCtmurCEKqKx2WwoaKrw2YCYYBK2Lw,12628
-pip/_vendor/requests/packages/chardet/langhebrewmodel.py,sha256=4ASl5vzKJPng4H278VHKtRYC03TpQpenlHTcsmZH1rE,11318
-pip/_vendor/requests/packages/chardet/langhungarianmodel.py,sha256=SXwuUzh49_cBeMXhshRHdrhlkz0T8_pZWV_pdqBKNFk,12536
-pip/_vendor/requests/packages/chardet/langthaimodel.py,sha256=-k7djh3dGKngAGnt3WfuoJN7acDcWcmHAPojhaUd7q4,11275
-pip/_vendor/requests/packages/chardet/latin1prober.py,sha256=238JHOxH8aRudJY2NmeSv5s7i0Qe3GuklIU3HlYybvg,5232
-pip/_vendor/requests/packages/chardet/mbcharsetprober.py,sha256=9rOCjDVsmSMp6e7q2syqak22j7lrbUZhJhMee2gbVL0,3268
-pip/_vendor/requests/packages/chardet/mbcsgroupprober.py,sha256=SHRzNPLpDXfMJLA8phCHVU0WgqbgDCNxDQMolGX_7yk,1967
-pip/_vendor/requests/packages/chardet/mbcssm.py,sha256=IKwJXyxu34n6NojmxVxC60MLFtJKm-hIfxaFEnb3uBA,19590
-pip/_vendor/requests/packages/chardet/sbcharsetprober.py,sha256=Xq0lODqJnDgxglBiQI4BqTFiPbn63-0a5XNA5-hVu7U,4793
-pip/_vendor/requests/packages/chardet/sbcsgroupprober.py,sha256=8hLyH8RAG-aohBo7o_KciWVgRo42ZE_zEtuNG1JMRYI,3291
-pip/_vendor/requests/packages/chardet/sjisprober.py,sha256=UYOmiMDzttYIkSDoOB08UEagivJpUXz4tuWiWzTiOr8,3764
-pip/_vendor/requests/packages/chardet/universaldetector.py,sha256=h-E2x6XSCzlNjycYWG0Fe4Cf1SGdaIzUNu2HCphpMZA,6840
-pip/_vendor/requests/packages/chardet/utf8prober.py,sha256=7tdNZGrJY7jZUBD483GGMkiP0Tx8Fp-cGvWHoAsilHg,2652
-pip/_vendor/requests/packages/urllib3/__init__.py,sha256=EF9pbHgMzqQek2Y6EZ82A8B6wETFeW7bK0K-HoZ3Ffo,2852
-pip/_vendor/requests/packages/urllib3/_collections.py,sha256=RP-cHyTx4AgYwvoETK8q1IVRbWFJnE0VV692ZHSbU68,10553
-pip/_vendor/requests/packages/urllib3/connection.py,sha256=QCmkelYgtbc06DfJtgs22na78kRTLCTbLb-OSWLbt-A,11617
-pip/_vendor/requests/packages/urllib3/connectionpool.py,sha256=fls19n1Y4jnwOBsZz_9F01i08xH2gZXEIyyDmWd-mKU,33591
-pip/_vendor/requests/packages/urllib3/exceptions.py,sha256=zGjhZCR1wefEnCN5b7WouQ3UhXesJ2bRKYIeWusaFJs,5599
-pip/_vendor/requests/packages/urllib3/fields.py,sha256=WUMvCLvnw7XemBq6AmCgNPJwyIJL_vWaMHaA2FLlscM,5931
-pip/_vendor/requests/packages/urllib3/filepost.py,sha256=NvLlFsdt8ih_Q4S2ekQF3CJG0nOXs32YI-G04_AdT2g,2320
-pip/_vendor/requests/packages/urllib3/poolmanager.py,sha256=9Uf0fUk0aR_s1auXgwceoN2gbaIQ08lrum_cGEA9-_U,13092
-pip/_vendor/requests/packages/urllib3/request.py,sha256=jET7OvA3FSjxABBRGhCyMdPvM9XuJA6df9gRhkJiJiY,5988
-pip/_vendor/requests/packages/urllib3/response.py,sha256=wxJSV_6pyh6Cgx7XFVGpNhpZCbh4eL7lCSFaU4ixXXc,18615
-pip/_vendor/requests/packages/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/_vendor/requests/packages/urllib3/contrib/appengine.py,sha256=NdN_xOgDLMadUPe_dN3wdan_DH9-fxVNqFgq19tbqQs,7937
-pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.py,sha256=r-vMDMXAGbix9a7-IhbKVTATmAst-5g4hKYOLf8Kd5M,4531
-pip/_vendor/requests/packages/urllib3/contrib/pyopenssl.py,sha256=JsdAh0gL4XvQzhOEBRoFtJN91qLf1LFIDEFZs95445I,11778
-pip/_vendor/requests/packages/urllib3/contrib/socks.py,sha256=uPHtE6R8uyUbD9R8l2wO80c87WDGZ9rou3kNOwV74eA,5668
-pip/_vendor/requests/packages/urllib3/packages/__init__.py,sha256=nlChrGzkjCkmhCX9HrF_qHPUgosfsPQkVIJxiiLhk9g,109
-pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py,sha256=VQaPONfhVMsb8B63Xg7ZOydJqIE_jzeMhVN3Pec6ogw,8935
-pip/_vendor/requests/packages/urllib3/packages/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098
-pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py,sha256=cOWMIn1orgJoA35p6pSzO_-Dc6iOX9Dhl6D2sL9b_2o,460
-pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py,sha256=fK28k37hL7-D79v9iM2fHgNK9Q1Pw0M7qVRL4rkfFjQ,3778
-pip/_vendor/requests/packages/urllib3/util/__init__.py,sha256=n2QE9_0Bb6u8tf7LUc4qKe8V-Hz9G8lEOc9j_30Q8d0,892
-pip/_vendor/requests/packages/urllib3/util/connection.py,sha256=7B5Mmepg5Xd399VKE__VHxD2ObapYFrB3mWJ_EnIebs,4744
-pip/_vendor/requests/packages/urllib3/util/request.py,sha256=ZMDewRK-mjlK72szGIIjzYnLIn-zPP0WgJUMjKeZ6Tg,2128
-pip/_vendor/requests/packages/urllib3/util/response.py,sha256=1UFd5TIp9MyBp4xgnZoyQZscZVPPr0tWRaXNR5w_vds,2165
-pip/_vendor/requests/packages/urllib3/util/retry.py,sha256=5eA3GHR_L14qz66NU6gr-v5VbKYsvdEqOvCcsx1oLKo,10664
-pip/_vendor/requests/packages/urllib3/util/ssl_.py,sha256=7xR_jvQLTQA1U006wJ1bl2KuLGnD1qQvUcFM2uysedw,11622
-pip/_vendor/requests/packages/urllib3/util/timeout.py,sha256=ioAIYptFyBG7eU_r8_ZmO45hpj1dJE6WCvrGR9dNFjs,9596
-pip/_vendor/requests/packages/urllib3/util/url.py,sha256=EcX4ZfmgKWcqM4sY9FlC-yN4y_snuURPV0TpUPHNjnc,5879
-pip/_vendor/webencodings/__init__.py,sha256=t7rAQQxXwalY-ak9hTl73qHjhia9UH-sL-e00qQrBpo,10576
-pip/_vendor/webencodings/labels.py,sha256=4AO_KxTddqGtrL9ns7kAPjb0CcN6xsCIxbK37HY9r3E,8979
-pip/_vendor/webencodings/mklabels.py,sha256=GYIeywnpaLnP0GSic8LFWgd0UVvO_l1Nc6YoF-87R_4,1305
-pip/_vendor/webencodings/tests.py,sha256=7vTk7LgOJn_t1XtT_viofZlEJ7cJCzPe_hvVHOkcQl8,6562
-pip/_vendor/webencodings/x_user_defined.py,sha256=72cfPRhbfkRCGkkA8ZnvVV7UnoiLb5uPMhXwhrXiLPk,4306
-pip/commands/__init__.py,sha256=2Uq3HCdjchJD9FL1LB7rd5v6UySVAVizX0W3EX3hIoE,2244
-pip/commands/check.py,sha256=-A7GI1-WZBh9a4P6UoH_aR-J7I8Lz8ly7m3wnCjmevs,1382
-pip/commands/completion.py,sha256=kkPgVX7SUcJ_8Juw5GkgWaxHN9_45wmAr9mGs1zXEEs,2453
-pip/commands/download.py,sha256=8RuuPmSYgAq3iEDTqZY_1PDXRqREdUULHNjWJeAv7Mo,7810
-pip/commands/freeze.py,sha256=h6-yFMpjCjbNj8-gOm5UuoF6cg14N5rPV4TCi3_CeuI,2835
-pip/commands/hash.py,sha256=MCt4jEFyfoce0lVeNEz1x49uaTY-VDkKiBvvxrVcHkw,1597
-pip/commands/help.py,sha256=84HWkEdnGP_AEBHnn8gJP2Te0XTXRKFoXqXopbOZTNo,982
-pip/commands/install.py,sha256=ovG9p9n1X2NPqMgFVtSuT9kMbLAdx1r3YSSiXSvgOKI,17412
-pip/commands/list.py,sha256=93bCiFyt2Qut_YHkYHJMZHpXladmxsjS-yOtZeb3uqI,11369
-pip/commands/search.py,sha256=oTs9QNdefnrmCV_JeftG0PGiMuYVmiEDF1OUaYsmDao,4502
-pip/commands/show.py,sha256=ZYM57_7U8KP9MQIIyHKQdZxmiEZByy-DRzB697VFoTY,5891
-pip/commands/uninstall.py,sha256=tz8cXz4WdpUdnt3RvpdQwH6_SNMB50egBIZWa1dwfcc,2884
-pip/commands/wheel.py,sha256=z5SEhws2YRMb0Ml1IEkg6jFZMLRpLl86bHCrQbYt5zo,7729
-pip/compat/__init__.py,sha256=2Xs_IpsmdRgHbQgQO0c8_lPvHJnQXHyGWxPbLbYJL4c,4672
-pip/compat/dictconfig.py,sha256=dRrelPDWrceDSzFT51RTEVY2GuM7UDyc5Igh_tn4Fvk,23096
-pip/models/__init__.py,sha256=0Rs7_RA4DxeOkWT5Cq4CQzDrSEhvYcN3TH2cazr72PE,71
-pip/models/index.py,sha256=pUfbO__v3mD9j-2n_ClwPS8pVyx4l2wIwyvWt8GMCRA,487
-pip/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-pip/operations/check.py,sha256=uwUN9cs1sPo7c0Sj6pRrSv7b22Pk29SXUImTelVchMQ,1590
-pip/operations/freeze.py,sha256=k-7w7LsM-RpPv7ERBzHiPpYkH-GuYfHLyR-Cp_1VPL0,5194
-pip/req/__init__.py,sha256=vFwZY8_Vc1WU1zFAespg1My_r_AT3n7cN0W9eX0EFqk,276
-pip/req/req_file.py,sha256=fG9MDsXUNPhmGwxUiwrIXEynyD8Q7s3L47-hLZPDXq0,11926
-pip/req/req_install.py,sha256=gYrH-lwQMmt55VVbav_EtRIPu94cQbHFHm_Kq6AeHbg,46487
-pip/req/req_set.py,sha256=jHspXqcA2FxcF05dgUIAZ5huYPv6bn0wRUX0Z7PKmaA,34462
-pip/req/req_uninstall.py,sha256=fdH2VgCjEC8NRYDS7fRu3ZJaBBUEy-N5muwxDX5MBNM,6897
-pip/utils/__init__.py,sha256=HX_wYS15oiYOz-H3qG1Kbi1CY7AGWCNK5jloiD0fauc,27187
-pip/utils/appdirs.py,sha256=kj2LK-I2fC5QnEh_A_v-ev_IQMcXaWWF5DE39sNvCLQ,8811
-pip/utils/build.py,sha256=4smLRrfSCmXmjEnVnMFh2tBEpNcSLRe6J0ejZJ-wWJE,1312
-pip/utils/deprecation.py,sha256=X_FMjtDbMJqfqEkdRrki-mYyIdPB6I6DHUTCA_ChY6M,2232
-pip/utils/encoding.py,sha256=NQxGiFS5GbeAveLZTnx92t5r0PYqvt0iRnP2u9SGG1w,971
-pip/utils/filesystem.py,sha256=ZEVBuYM3fqr2_lgOESh4Y7fPFszGD474zVm_M3Mb5Tk,899
-pip/utils/glibc.py,sha256=jcQYjt_oJLPKVZB28Kauy4Sw70zS-wawxoU1HHX36_0,2939
-pip/utils/hashes.py,sha256=oMk7cd3PbJgzpSQyXq1MytMud5f6H5Oa2YY5hYuCq6I,2866
-pip/utils/logging.py,sha256=7yWu4gZw-Qclj7X80QVdpGWkdTWGKT4LiUVKcE04pro,3327
-pip/utils/outdated.py,sha256=fNwOCL5r2EftPGhgCYGMKu032HC8cV-JAr9lp0HmToM,5455
-pip/utils/packaging.py,sha256=qhmli14odw6DIhWJgQYS2Q0RrSbr8nXNcG48f5yTRms,2080
-pip/utils/setuptools_build.py,sha256=0blfscmNJW_iZ5DcswJeDB_PbtTEjfK9RL1R1WEDW2E,278
-pip/utils/ui.py,sha256=pbDkSAeumZ6jdZcOJ2yAbx8iBgeP2zfpqNnLJK1gskQ,11597
-pip/vcs/__init__.py,sha256=WafFliUTHMmsSISV8PHp1M5EXDNSWyJr78zKaQmPLdY,12374
-pip/vcs/bazaar.py,sha256=tYTwc4b4off8mr0O2o8SiGejqBDJxcbDBMSMd9-ISYc,3803
-pip/vcs/git.py,sha256=5LfWryi78A-2ULjEZJvCTarJ_3l8venwXASlwm8hiug,11197
-pip/vcs/mercurial.py,sha256=xG6rDiwHCRytJEs23SIHBXl_SwQo2jkkdD_6rVVP5h4,3472
-pip/vcs/subversion.py,sha256=GAuX2Sk7IZvJyEzENKcVld_wGBrQ3fpXDlXjapZEYdI,9350
-pip-9.0.1.dist-info/DESCRIPTION.rst,sha256=Va8Wj1XBpTbVQ2Z41mZRJdALEeziiS_ZewWn1H2ecY4,1287
-pip-9.0.1.dist-info/METADATA,sha256=mvs_tLoKAbECXY_6QHiVWQsagSL-1UjolQTpScT8JSk,2529
-pip-9.0.1.dist-info/RECORD,,
-pip-9.0.1.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110
-pip-9.0.1.dist-info/entry_points.txt,sha256=GWc-Wb9WUKZ1EuVWNz-G0l3BeIpbNJLx0OJbZ61AAV0,68
-pip-9.0.1.dist-info/metadata.json,sha256=aqvkETDy4mHUBob-2Fn5WWlXORi_M2OSfQ2HQCUU_Fk,1565
-pip-9.0.1.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-../../Scripts/pip.exe,sha256=czaxfvd0QENGZdLE36pybZ0DlYpzjXeARPia_OT5OYE,89494
-../../Scripts/pip3.exe,sha256=czaxfvd0QENGZdLE36pybZ0DlYpzjXeARPia_OT5OYE,89494
-../../Scripts/pip3.6.exe,sha256=czaxfvd0QENGZdLE36pybZ0DlYpzjXeARPia_OT5OYE,89494
-pip-9.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-pip/commands/__pycache__/check.cpython-36.pyc,,
-pip/commands/__pycache__/completion.cpython-36.pyc,,
-pip/commands/__pycache__/download.cpython-36.pyc,,
-pip/commands/__pycache__/freeze.cpython-36.pyc,,
-pip/commands/__pycache__/hash.cpython-36.pyc,,
-pip/commands/__pycache__/help.cpython-36.pyc,,
-pip/commands/__pycache__/install.cpython-36.pyc,,
-pip/commands/__pycache__/list.cpython-36.pyc,,
-pip/commands/__pycache__/search.cpython-36.pyc,,
-pip/commands/__pycache__/show.cpython-36.pyc,,
-pip/commands/__pycache__/uninstall.cpython-36.pyc,,
-pip/commands/__pycache__/wheel.cpython-36.pyc,,
-pip/commands/__pycache__/__init__.cpython-36.pyc,,
-pip/compat/__pycache__/dictconfig.cpython-36.pyc,,
-pip/compat/__pycache__/__init__.cpython-36.pyc,,
-pip/models/__pycache__/index.cpython-36.pyc,,
-pip/models/__pycache__/__init__.cpython-36.pyc,,
-pip/operations/__pycache__/check.cpython-36.pyc,,
-pip/operations/__pycache__/freeze.cpython-36.pyc,,
-pip/operations/__pycache__/__init__.cpython-36.pyc,,
-pip/req/__pycache__/req_file.cpython-36.pyc,,
-pip/req/__pycache__/req_install.cpython-36.pyc,,
-pip/req/__pycache__/req_set.cpython-36.pyc,,
-pip/req/__pycache__/req_uninstall.cpython-36.pyc,,
-pip/req/__pycache__/__init__.cpython-36.pyc,,
-pip/utils/__pycache__/appdirs.cpython-36.pyc,,
-pip/utils/__pycache__/build.cpython-36.pyc,,
-pip/utils/__pycache__/deprecation.cpython-36.pyc,,
-pip/utils/__pycache__/encoding.cpython-36.pyc,,
-pip/utils/__pycache__/filesystem.cpython-36.pyc,,
-pip/utils/__pycache__/glibc.cpython-36.pyc,,
-pip/utils/__pycache__/hashes.cpython-36.pyc,,
-pip/utils/__pycache__/logging.cpython-36.pyc,,
-pip/utils/__pycache__/outdated.cpython-36.pyc,,
-pip/utils/__pycache__/packaging.cpython-36.pyc,,
-pip/utils/__pycache__/setuptools_build.cpython-36.pyc,,
-pip/utils/__pycache__/ui.cpython-36.pyc,,
-pip/utils/__pycache__/__init__.cpython-36.pyc,,
-pip/vcs/__pycache__/bazaar.cpython-36.pyc,,
-pip/vcs/__pycache__/git.cpython-36.pyc,,
-pip/vcs/__pycache__/mercurial.cpython-36.pyc,,
-pip/vcs/__pycache__/subversion.cpython-36.pyc,,
-pip/vcs/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-36.pyc,,
-pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-36.pyc,,
-pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/cachecontrol/__pycache__/adapter.cpython-36.pyc,,
-pip/_vendor/cachecontrol/__pycache__/cache.cpython-36.pyc,,
-pip/_vendor/cachecontrol/__pycache__/compat.cpython-36.pyc,,
-pip/_vendor/cachecontrol/__pycache__/controller.cpython-36.pyc,,
-pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-36.pyc,,
-pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-36.pyc,,
-pip/_vendor/cachecontrol/__pycache__/serialize.cpython-36.pyc,,
-pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-36.pyc,,
-pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-36.pyc,,
-pip/_vendor/cachecontrol/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/colorama/__pycache__/ansi.cpython-36.pyc,,
-pip/_vendor/colorama/__pycache__/ansitowin32.cpython-36.pyc,,
-pip/_vendor/colorama/__pycache__/initialise.cpython-36.pyc,,
-pip/_vendor/colorama/__pycache__/win32.cpython-36.pyc,,
-pip/_vendor/colorama/__pycache__/winterm.cpython-36.pyc,,
-pip/_vendor/colorama/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/distlib/_backport/__pycache__/misc.cpython-36.pyc,,
-pip/_vendor/distlib/_backport/__pycache__/shutil.cpython-36.pyc,,
-pip/_vendor/distlib/_backport/__pycache__/sysconfig.cpython-36.pyc,,
-pip/_vendor/distlib/_backport/__pycache__/tarfile.cpython-36.pyc,,
-pip/_vendor/distlib/_backport/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/distlib/__pycache__/compat.cpython-36.pyc,,
-pip/_vendor/distlib/__pycache__/database.cpython-36.pyc,,
-pip/_vendor/distlib/__pycache__/index.cpython-36.pyc,,
-pip/_vendor/distlib/__pycache__/locators.cpython-36.pyc,,
-pip/_vendor/distlib/__pycache__/manifest.cpython-36.pyc,,
-pip/_vendor/distlib/__pycache__/markers.cpython-36.pyc,,
-pip/_vendor/distlib/__pycache__/metadata.cpython-36.pyc,,
-pip/_vendor/distlib/__pycache__/resources.cpython-36.pyc,,
-pip/_vendor/distlib/__pycache__/scripts.cpython-36.pyc,,
-pip/_vendor/distlib/__pycache__/util.cpython-36.pyc,,
-pip/_vendor/distlib/__pycache__/version.cpython-36.pyc,,
-pip/_vendor/distlib/__pycache__/wheel.cpython-36.pyc,,
-pip/_vendor/distlib/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/html5lib/filters/__pycache__/alphabeticalattributes.cpython-36.pyc,,
-pip/_vendor/html5lib/filters/__pycache__/base.cpython-36.pyc,,
-pip/_vendor/html5lib/filters/__pycache__/inject_meta_charset.cpython-36.pyc,,
-pip/_vendor/html5lib/filters/__pycache__/lint.cpython-36.pyc,,
-pip/_vendor/html5lib/filters/__pycache__/optionaltags.cpython-36.pyc,,
-pip/_vendor/html5lib/filters/__pycache__/sanitizer.cpython-36.pyc,,
-pip/_vendor/html5lib/filters/__pycache__/whitespace.cpython-36.pyc,,
-pip/_vendor/html5lib/filters/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/html5lib/treeadapters/__pycache__/genshi.cpython-36.pyc,,
-pip/_vendor/html5lib/treeadapters/__pycache__/sax.cpython-36.pyc,,
-pip/_vendor/html5lib/treeadapters/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/html5lib/treebuilders/__pycache__/base.cpython-36.pyc,,
-pip/_vendor/html5lib/treebuilders/__pycache__/dom.cpython-36.pyc,,
-pip/_vendor/html5lib/treebuilders/__pycache__/etree.cpython-36.pyc,,
-pip/_vendor/html5lib/treebuilders/__pycache__/etree_lxml.cpython-36.pyc,,
-pip/_vendor/html5lib/treebuilders/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/html5lib/treewalkers/__pycache__/base.cpython-36.pyc,,
-pip/_vendor/html5lib/treewalkers/__pycache__/dom.cpython-36.pyc,,
-pip/_vendor/html5lib/treewalkers/__pycache__/etree.cpython-36.pyc,,
-pip/_vendor/html5lib/treewalkers/__pycache__/etree_lxml.cpython-36.pyc,,
-pip/_vendor/html5lib/treewalkers/__pycache__/genshi.cpython-36.pyc,,
-pip/_vendor/html5lib/treewalkers/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/html5lib/_trie/__pycache__/datrie.cpython-36.pyc,,
-pip/_vendor/html5lib/_trie/__pycache__/py.cpython-36.pyc,,
-pip/_vendor/html5lib/_trie/__pycache__/_base.cpython-36.pyc,,
-pip/_vendor/html5lib/_trie/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/html5lib/__pycache__/constants.cpython-36.pyc,,
-pip/_vendor/html5lib/__pycache__/html5parser.cpython-36.pyc,,
-pip/_vendor/html5lib/__pycache__/serializer.cpython-36.pyc,,
-pip/_vendor/html5lib/__pycache__/_ihatexml.cpython-36.pyc,,
-pip/_vendor/html5lib/__pycache__/_inputstream.cpython-36.pyc,,
-pip/_vendor/html5lib/__pycache__/_tokenizer.cpython-36.pyc,,
-pip/_vendor/html5lib/__pycache__/_utils.cpython-36.pyc,,
-pip/_vendor/html5lib/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/lockfile/__pycache__/linklockfile.cpython-36.pyc,,
-pip/_vendor/lockfile/__pycache__/mkdirlockfile.cpython-36.pyc,,
-pip/_vendor/lockfile/__pycache__/pidlockfile.cpython-36.pyc,,
-pip/_vendor/lockfile/__pycache__/sqlitelockfile.cpython-36.pyc,,
-pip/_vendor/lockfile/__pycache__/symlinklockfile.cpython-36.pyc,,
-pip/_vendor/lockfile/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/packaging/__pycache__/markers.cpython-36.pyc,,
-pip/_vendor/packaging/__pycache__/requirements.cpython-36.pyc,,
-pip/_vendor/packaging/__pycache__/specifiers.cpython-36.pyc,,
-pip/_vendor/packaging/__pycache__/utils.cpython-36.pyc,,
-pip/_vendor/packaging/__pycache__/version.cpython-36.pyc,,
-pip/_vendor/packaging/__pycache__/_compat.cpython-36.pyc,,
-pip/_vendor/packaging/__pycache__/_structures.cpython-36.pyc,,
-pip/_vendor/packaging/__pycache__/__about__.cpython-36.pyc,,
-pip/_vendor/packaging/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/pkg_resources/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/progress/__pycache__/bar.cpython-36.pyc,,
-pip/_vendor/progress/__pycache__/counter.cpython-36.pyc,,
-pip/_vendor/progress/__pycache__/helpers.cpython-36.pyc,,
-pip/_vendor/progress/__pycache__/spinner.cpython-36.pyc,,
-pip/_vendor/progress/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/big5freq.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/big5prober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/chardetect.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/chardistribution.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/charsetgroupprober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/charsetprober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/codingstatemachine.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/compat.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/constants.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/cp949prober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/escprober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/escsm.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/eucjpprober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/euckrfreq.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/euckrprober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/euctwfreq.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/euctwprober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/gb2312freq.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/gb2312prober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/hebrewprober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/jisfreq.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/jpcntx.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/langbulgarianmodel.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/langcyrillicmodel.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/langgreekmodel.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/langhebrewmodel.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/langhungarianmodel.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/langthaimodel.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/latin1prober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/mbcharsetprober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/mbcsgroupprober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/mbcssm.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/sbcharsetprober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/sbcsgroupprober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/sjisprober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/universaldetector.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/utf8prober.cpython-36.pyc,,
-pip/_vendor/requests/packages/chardet/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/contrib/__pycache__/appengine.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/contrib/__pycache__/ntlmpool.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/contrib/__pycache__/pyopenssl.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/contrib/__pycache__/socks.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/contrib/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/__pycache__/_implementation.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/packages/__pycache__/ordered_dict.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/packages/__pycache__/six.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/packages/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/util/__pycache__/connection.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/util/__pycache__/request.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/util/__pycache__/response.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/util/__pycache__/retry.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/util/__pycache__/ssl_.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/util/__pycache__/timeout.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/util/__pycache__/url.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/util/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/__pycache__/connection.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/__pycache__/connectionpool.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/__pycache__/exceptions.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/__pycache__/fields.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/__pycache__/filepost.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/__pycache__/poolmanager.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/__pycache__/request.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/__pycache__/response.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/__pycache__/_collections.cpython-36.pyc,,
-pip/_vendor/requests/packages/urllib3/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/requests/packages/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/requests/__pycache__/adapters.cpython-36.pyc,,
-pip/_vendor/requests/__pycache__/api.cpython-36.pyc,,
-pip/_vendor/requests/__pycache__/auth.cpython-36.pyc,,
-pip/_vendor/requests/__pycache__/certs.cpython-36.pyc,,
-pip/_vendor/requests/__pycache__/compat.cpython-36.pyc,,
-pip/_vendor/requests/__pycache__/cookies.cpython-36.pyc,,
-pip/_vendor/requests/__pycache__/exceptions.cpython-36.pyc,,
-pip/_vendor/requests/__pycache__/hooks.cpython-36.pyc,,
-pip/_vendor/requests/__pycache__/models.cpython-36.pyc,,
-pip/_vendor/requests/__pycache__/sessions.cpython-36.pyc,,
-pip/_vendor/requests/__pycache__/status_codes.cpython-36.pyc,,
-pip/_vendor/requests/__pycache__/structures.cpython-36.pyc,,
-pip/_vendor/requests/__pycache__/utils.cpython-36.pyc,,
-pip/_vendor/requests/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/webencodings/__pycache__/labels.cpython-36.pyc,,
-pip/_vendor/webencodings/__pycache__/mklabels.cpython-36.pyc,,
-pip/_vendor/webencodings/__pycache__/tests.cpython-36.pyc,,
-pip/_vendor/webencodings/__pycache__/x_user_defined.cpython-36.pyc,,
-pip/_vendor/webencodings/__pycache__/__init__.cpython-36.pyc,,
-pip/_vendor/__pycache__/appdirs.cpython-36.pyc,,
-pip/_vendor/__pycache__/distro.cpython-36.pyc,,
-pip/_vendor/__pycache__/ipaddress.cpython-36.pyc,,
-pip/_vendor/__pycache__/ordereddict.cpython-36.pyc,,
-pip/_vendor/__pycache__/pyparsing.cpython-36.pyc,,
-pip/_vendor/__pycache__/re-vendor.cpython-36.pyc,,
-pip/_vendor/__pycache__/retrying.cpython-36.pyc,,
-pip/_vendor/__pycache__/six.cpython-36.pyc,,
-pip/_vendor/__pycache__/__init__.cpython-36.pyc,,
-pip/__pycache__/basecommand.cpython-36.pyc,,
-pip/__pycache__/baseparser.cpython-36.pyc,,
-pip/__pycache__/cmdoptions.cpython-36.pyc,,
-pip/__pycache__/download.cpython-36.pyc,,
-pip/__pycache__/exceptions.cpython-36.pyc,,
-pip/__pycache__/index.cpython-36.pyc,,
-pip/__pycache__/locations.cpython-36.pyc,,
-pip/__pycache__/pep425tags.cpython-36.pyc,,
-pip/__pycache__/status_codes.cpython-36.pyc,,
-pip/__pycache__/wheel.cpython-36.pyc,,
-pip/__pycache__/__init__.cpython-36.pyc,,
-pip/__pycache__/__main__.cpython-36.pyc,,
diff --git a/env/Lib/site-packages/pip-9.0.1.dist-info/WHEEL b/env/Lib/site-packages/pip-9.0.1.dist-info/WHEEL
deleted file mode 100644
index 8b6dd1b..0000000
--- a/env/Lib/site-packages/pip-9.0.1.dist-info/WHEEL
+++ /dev/null
@@ -1,6 +0,0 @@
-Wheel-Version: 1.0
-Generator: bdist_wheel (0.29.0)
-Root-Is-Purelib: true
-Tag: py2-none-any
-Tag: py3-none-any
-
diff --git a/env/Lib/site-packages/pip-9.0.1.dist-info/entry_points.txt b/env/Lib/site-packages/pip-9.0.1.dist-info/entry_points.txt
deleted file mode 100644
index c02a8d5..0000000
--- a/env/Lib/site-packages/pip-9.0.1.dist-info/entry_points.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-[console_scripts]
-pip = pip:main
-pip3 = pip:main
-pip3.5 = pip:main
-
diff --git a/env/Lib/site-packages/pip-9.0.1.dist-info/metadata.json b/env/Lib/site-packages/pip-9.0.1.dist-info/metadata.json
deleted file mode 100644
index 9eae02c..0000000
--- a/env/Lib/site-packages/pip-9.0.1.dist-info/metadata.json
+++ /dev/null
@@ -1 +0,0 @@
-{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Topic :: Software Development :: Build Tools", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: Implementation :: PyPy"], "extensions": {"python.commands": {"wrap_console": {"pip": "pip:main", "pip3": "pip:main", "pip3.5": "pip:main"}}, "python.details": {"contacts": [{"email": "python-virtualenv@groups.google.com", "name": "The pip developers", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://pip.pypa.io/"}}, "python.exports": {"console_scripts": {"pip": "pip:main", "pip3": "pip:main", "pip3.5": "pip:main"}}}, "extras": ["testing"], "generator": "bdist_wheel (0.29.0)", "keywords": ["easy_install", "distutils", "setuptools", "egg", "virtualenv"], "license": "MIT", "metadata_version": "2.0", "name": "pip", "requires_python": ">=2.6,!=3.0.*,!=3.1.*,!=3.2.*", "run_requires": [{"extra": "testing", "requires": ["mock", "pretend", "pytest", "scripttest (>=1.3)", "virtualenv (>=1.10)"]}], "summary": "The PyPA recommended tool for installing Python packages.", "test_requires": [{"requires": ["mock", "pretend", "pytest", "scripttest (>=1.3)", "virtualenv (>=1.10)"]}], "version": "9.0.1"}
\ No newline at end of file
diff --git a/env/Lib/site-packages/pip-9.0.1.dist-info/top_level.txt b/env/Lib/site-packages/pip-9.0.1.dist-info/top_level.txt
deleted file mode 100644
index a1b589e..0000000
--- a/env/Lib/site-packages/pip-9.0.1.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-pip
diff --git a/env/Lib/site-packages/pip/__init__.py b/env/Lib/site-packages/pip/__init__.py
deleted file mode 100644
index 9c1d8f9..0000000
--- a/env/Lib/site-packages/pip/__init__.py
+++ /dev/null
@@ -1,331 +0,0 @@
-#!/usr/bin/env python
-from __future__ import absolute_import
-
-import locale
-import logging
-import os
-import optparse
-import warnings
-
-import sys
-import re
-
-# 2016-06-17 barry@debian.org: urllib3 1.14 added optional support for socks,
-# but if invoked (i.e. imported), it will issue a warning to stderr if socks
-# isn't available. requests unconditionally imports urllib3's socks contrib
-# module, triggering this warning. The warning breaks DEP-8 tests (because of
-# the stderr output) and is just plain annoying in normal usage. I don't want
-# to add socks as yet another dependency for pip, nor do I want to allow-stder
-# in the DEP-8 tests, so just suppress the warning. pdb tells me this has to
-# be done before the import of pip.vcs.
-from pip._vendor.requests.packages.urllib3.exceptions import DependencyWarning
-warnings.filterwarnings("ignore", category=DependencyWarning) # noqa
-
-
-from pip.exceptions import InstallationError, CommandError, PipError
-from pip.utils import get_installed_distributions, get_prog
-from pip.utils import deprecation, dist_is_editable
-from pip.vcs import git, mercurial, subversion, bazaar # noqa
-from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
-from pip.commands import get_summaries, get_similar_commands
-from pip.commands import commands_dict
-from pip._vendor.requests.packages.urllib3.exceptions import (
- InsecureRequestWarning,
-)
-
-
-# assignment for flake8 to be happy
-
-# This fixes a peculiarity when importing via __import__ - as we are
-# initialising the pip module, "from pip import cmdoptions" is recursive
-# and appears not to work properly in that situation.
-import pip.cmdoptions
-cmdoptions = pip.cmdoptions
-
-# The version as used in the setup.py and the docs conf.py
-__version__ = "9.0.1"
-
-
-logger = logging.getLogger(__name__)
-
-# Hide the InsecureRequestWarning from urllib3
-warnings.filterwarnings("ignore", category=InsecureRequestWarning)
-
-
-def autocomplete():
- """Command and option completion for the main option parser (and options)
- and its subcommands (and options).
-
- Enable by sourcing one of the completion shell scripts (bash, zsh or fish).
- """
- # Don't complete if user hasn't sourced bash_completion file.
- if 'PIP_AUTO_COMPLETE' not in os.environ:
- return
- cwords = os.environ['COMP_WORDS'].split()[1:]
- cword = int(os.environ['COMP_CWORD'])
- try:
- current = cwords[cword - 1]
- except IndexError:
- current = ''
-
- subcommands = [cmd for cmd, summary in get_summaries()]
- options = []
- # subcommand
- try:
- subcommand_name = [w for w in cwords if w in subcommands][0]
- except IndexError:
- subcommand_name = None
-
- parser = create_main_parser()
- # subcommand options
- if subcommand_name:
- # special case: 'help' subcommand has no options
- if subcommand_name == 'help':
- sys.exit(1)
- # special case: list locally installed dists for uninstall command
- if subcommand_name == 'uninstall' and not current.startswith('-'):
- installed = []
- lc = current.lower()
- for dist in get_installed_distributions(local_only=True):
- if dist.key.startswith(lc) and dist.key not in cwords[1:]:
- installed.append(dist.key)
- # if there are no dists installed, fall back to option completion
- if installed:
- for dist in installed:
- print(dist)
- sys.exit(1)
-
- subcommand = commands_dict[subcommand_name]()
- options += [(opt.get_opt_string(), opt.nargs)
- for opt in subcommand.parser.option_list_all
- if opt.help != optparse.SUPPRESS_HELP]
-
- # filter out previously specified options from available options
- prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
- options = [(x, v) for (x, v) in options if x not in prev_opts]
- # filter options by current input
- options = [(k, v) for k, v in options if k.startswith(current)]
- for option in options:
- opt_label = option[0]
- # append '=' to options which require args
- if option[1]:
- opt_label += '='
- print(opt_label)
- else:
- # show main parser options only when necessary
- if current.startswith('-') or current.startswith('--'):
- opts = [i.option_list for i in parser.option_groups]
- opts.append(parser.option_list)
- opts = (o for it in opts for o in it)
-
- subcommands += [i.get_opt_string() for i in opts
- if i.help != optparse.SUPPRESS_HELP]
-
- print(' '.join([x for x in subcommands if x.startswith(current)]))
- sys.exit(1)
-
-
-def create_main_parser():
- parser_kw = {
- 'usage': '\n%prog [options]',
- 'add_help_option': False,
- 'formatter': UpdatingDefaultsHelpFormatter(),
- 'name': 'global',
- 'prog': get_prog(),
- }
-
- parser = ConfigOptionParser(**parser_kw)
- parser.disable_interspersed_args()
-
- pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
- parser.version = 'pip %s from %s (python %s)' % (
- __version__, pip_pkg_dir, sys.version[:3])
-
- # add the general options
- gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
- parser.add_option_group(gen_opts)
-
- parser.main = True # so the help formatter knows
-
- # create command listing for description
- command_summaries = get_summaries()
- description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
- parser.description = '\n'.join(description)
-
- return parser
-
-
-def parseopts(args):
- parser = create_main_parser()
-
- # Note: parser calls disable_interspersed_args(), so the result of this
- # call is to split the initial args into the general options before the
- # subcommand and everything else.
- # For example:
- # args: ['--timeout=5', 'install', '--user', 'INITools']
- # general_options: ['--timeout==5']
- # args_else: ['install', '--user', 'INITools']
- general_options, args_else = parser.parse_args(args)
-
- # --version
- if general_options.version:
- sys.stdout.write(parser.version)
- sys.stdout.write(os.linesep)
- sys.exit()
-
- # pip || pip help -> print_help()
- if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
- parser.print_help()
- sys.exit()
-
- # the subcommand name
- cmd_name = args_else[0]
-
- if cmd_name not in commands_dict:
- guess = get_similar_commands(cmd_name)
-
- msg = ['unknown command "%s"' % cmd_name]
- if guess:
- msg.append('maybe you meant "%s"' % guess)
-
- raise CommandError(' - '.join(msg))
-
- # all the args without the subcommand
- cmd_args = args[:]
- cmd_args.remove(cmd_name)
-
- return cmd_name, cmd_args
-
-
-def check_isolated(args):
- isolated = False
-
- if "--isolated" in args:
- isolated = True
-
- return isolated
-
-
-def main(args=None):
- if args is None:
- args = sys.argv[1:]
-
- # Configure our deprecation warnings to be sent through loggers
- deprecation.install_warning_logger()
-
- autocomplete()
-
- try:
- cmd_name, cmd_args = parseopts(args)
- except PipError as exc:
- sys.stderr.write("ERROR: %s" % exc)
- sys.stderr.write(os.linesep)
- sys.exit(1)
-
- # Needed for locale.getpreferredencoding(False) to work
- # in pip.utils.encoding.auto_decode
- try:
- locale.setlocale(locale.LC_ALL, '')
- except locale.Error as e:
- # setlocale can apparently crash if locale are uninitialized
- logger.debug("Ignoring error %s when setting locale", e)
- command = commands_dict[cmd_name](isolated=check_isolated(cmd_args))
- return command.main(cmd_args)
-
-
-# ###########################################################
-# # Writing freeze files
-
-class FrozenRequirement(object):
-
- def __init__(self, name, req, editable, comments=()):
- self.name = name
- self.req = req
- self.editable = editable
- self.comments = comments
-
- _rev_re = re.compile(r'-r(\d+)$')
- _date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
-
- @classmethod
- def from_dist(cls, dist, dependency_links):
- location = os.path.normcase(os.path.abspath(dist.location))
- comments = []
- from pip.vcs import vcs, get_src_requirement
- if dist_is_editable(dist) and vcs.get_backend_name(location):
- editable = True
- try:
- req = get_src_requirement(dist, location)
- except InstallationError as exc:
- logger.warning(
- "Error when trying to get requirement for VCS system %s, "
- "falling back to uneditable format", exc
- )
- req = None
- if req is None:
- logger.warning(
- 'Could not determine repository location of %s', location
- )
- comments.append(
- '## !! Could not determine repository location'
- )
- req = dist.as_requirement()
- editable = False
- else:
- editable = False
- req = dist.as_requirement()
- specs = req.specs
- assert len(specs) == 1 and specs[0][0] in ["==", "==="], \
- 'Expected 1 spec with == or ===; specs = %r; dist = %r' % \
- (specs, dist)
- version = specs[0][1]
- ver_match = cls._rev_re.search(version)
- date_match = cls._date_re.search(version)
- if ver_match or date_match:
- svn_backend = vcs.get_backend('svn')
- if svn_backend:
- svn_location = svn_backend().get_location(
- dist,
- dependency_links,
- )
- if not svn_location:
- logger.warning(
- 'Warning: cannot find svn location for %s', req)
- comments.append(
- '## FIXME: could not find svn URL in dependency_links '
- 'for this package:'
- )
- else:
- comments.append(
- '# Installing as editable to satisfy requirement %s:' %
- req
- )
- if ver_match:
- rev = ver_match.group(1)
- else:
- rev = '{%s}' % date_match.group(1)
- editable = True
- req = '%s@%s#egg=%s' % (
- svn_location,
- rev,
- cls.egg_name(dist)
- )
- return cls(dist.project_name, req, editable, comments)
-
- @staticmethod
- def egg_name(dist):
- name = dist.egg_name()
- match = re.search(r'-py\d\.\d$', name)
- if match:
- name = name[:match.start()]
- return name
-
- def __str__(self):
- req = self.req
- if self.editable:
- req = '-e %s' % req
- return '\n'.join(list(self.comments) + [str(req)]) + '\n'
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/env/Lib/site-packages/pip/__main__.py b/env/Lib/site-packages/pip/__main__.py
deleted file mode 100644
index 5556539..0000000
--- a/env/Lib/site-packages/pip/__main__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from __future__ import absolute_import
-
-import os
-import sys
-
-# If we are running from a wheel, add the wheel to sys.path
-# This allows the usage python pip-*.whl/pip install pip-*.whl
-if __package__ == '':
- # __file__ is pip-*.whl/pip/__main__.py
- # first dirname call strips of '/__main__.py', second strips off '/pip'
- # Resulting path is the name of the wheel itself
- # Add that to sys.path so we can import pip
- path = os.path.dirname(os.path.dirname(__file__))
- sys.path.insert(0, path)
-
-import pip # noqa
-
-if __name__ == '__main__':
- sys.exit(pip.main())
diff --git a/env/Lib/site-packages/pip/_vendor/__init__.py b/env/Lib/site-packages/pip/_vendor/__init__.py
deleted file mode 100644
index bee5f5e..0000000
--- a/env/Lib/site-packages/pip/_vendor/__init__.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""
-pip._vendor is for vendoring dependencies of pip to prevent needing pip to
-depend on something external.
-
-Files inside of pip._vendor should be considered immutable and should only be
-updated to versions from upstream.
-"""
-from __future__ import absolute_import
-
-import glob
-import os.path
-import sys
-
-# Downstream redistributors which have debundled our dependencies should also
-# patch this value to be true. This will trigger the additional patching
-# to cause things like "six" to be available as pip.
-DEBUNDLED = False
-
-# By default, look in this directory for a bunch of .whl files which we will
-# add to the beginning of sys.path before attempting to import anything. This
-# is done to support downstream re-distributors like Debian and Fedora who
-# wish to create their own Wheels for our dependencies to aid in debundling.
-WHEEL_DIR = os.path.abspath(os.path.dirname(__file__))
-
-
-# Define a small helper function to alias our vendored modules to the real ones
-# if the vendored ones do not exist. This idea of this was taken from
-# https://github.com/kennethreitz/requests/pull/2567.
-def vendored(modulename):
- vendored_name = "{0}.{1}".format(__name__, modulename)
-
- try:
- __import__(vendored_name, globals(), locals(), level=0)
- except ImportError:
- try:
- __import__(modulename, globals(), locals(), level=0)
- except ImportError:
- # We can just silently allow import failures to pass here. If we
- # got to this point it means that ``import pip._vendor.whatever``
- # failed and so did ``import whatever``. Since we're importing this
- # upfront in an attempt to alias imports, not erroring here will
- # just mean we get a regular import error whenever pip *actually*
- # tries to import one of these modules to use it, which actually
- # gives us a better error message than we would have otherwise
- # gotten.
- pass
- else:
- sys.modules[vendored_name] = sys.modules[modulename]
- base, head = vendored_name.rsplit(".", 1)
- setattr(sys.modules[base], head, sys.modules[modulename])
-
-
-# If we're operating in a debundled setup, then we want to go ahead and trigger
-# the aliasing of our vendored libraries as well as looking for wheels to add
-# to our sys.path. This will cause all of this code to be a no-op typically
-# however downstream redistributors can enable it in a consistent way across
-# all platforms.
-if DEBUNDLED:
- # Actually look inside of WHEEL_DIR to find .whl files and add them to the
- # front of our sys.path.
- sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path
-
- # Actually alias all of our vendored dependencies.
- vendored("cachecontrol")
- vendored("colorama")
- vendored("distlib")
- vendored("distro")
- vendored("html5lib")
- vendored("lockfile")
- vendored("six")
- vendored("six.moves")
- vendored("six.moves.urllib")
- vendored("packaging")
- vendored("packaging.version")
- vendored("packaging.specifiers")
- vendored("pkg_resources")
- vendored("progress")
- vendored("retrying")
- vendored("requests")
- vendored("requests.packages")
- vendored("requests.packages.urllib3")
- vendored("requests.packages.urllib3._collections")
- vendored("requests.packages.urllib3.connection")
- vendored("requests.packages.urllib3.connectionpool")
- vendored("requests.packages.urllib3.contrib")
- vendored("requests.packages.urllib3.contrib.ntlmpool")
- vendored("requests.packages.urllib3.contrib.pyopenssl")
- vendored("requests.packages.urllib3.exceptions")
- vendored("requests.packages.urllib3.fields")
- vendored("requests.packages.urllib3.filepost")
- vendored("requests.packages.urllib3.packages")
- vendored("requests.packages.urllib3.packages.ordered_dict")
- vendored("requests.packages.urllib3.packages.six")
- vendored("requests.packages.urllib3.packages.ssl_match_hostname")
- vendored("requests.packages.urllib3.packages.ssl_match_hostname."
- "_implementation")
- vendored("requests.packages.urllib3.poolmanager")
- vendored("requests.packages.urllib3.request")
- vendored("requests.packages.urllib3.response")
- vendored("requests.packages.urllib3.util")
- vendored("requests.packages.urllib3.util.connection")
- vendored("requests.packages.urllib3.util.request")
- vendored("requests.packages.urllib3.util.response")
- vendored("requests.packages.urllib3.util.retry")
- vendored("requests.packages.urllib3.util.ssl_")
- vendored("requests.packages.urllib3.util.timeout")
- vendored("requests.packages.urllib3.util.url")
diff --git a/env/Lib/site-packages/pip/_vendor/appdirs.py b/env/Lib/site-packages/pip/_vendor/appdirs.py
deleted file mode 100644
index 4b5c38b..0000000
--- a/env/Lib/site-packages/pip/_vendor/appdirs.py
+++ /dev/null
@@ -1,552 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Copyright (c) 2005-2010 ActiveState Software Inc.
-# Copyright (c) 2013 Eddy Petrișor
-
-"""Utilities for determining application-specific dirs.
-
-See for details and usage.
-"""
-# Dev Notes:
-# - MSDN on where to store app data files:
-# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
-# - macOS: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
-# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
-
-__version_info__ = (1, 4, 0)
-__version__ = '.'.join(map(str, __version_info__))
-
-
-import sys
-import os
-
-PY3 = sys.version_info[0] == 3
-
-if PY3:
- unicode = str
-
-if sys.platform.startswith('java'):
- import platform
- os_name = platform.java_ver()[3][0]
- if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
- system = 'win32'
- elif os_name.startswith('Mac'): # "macOS", etc.
- system = 'darwin'
- else: # "Linux", "SunOS", "FreeBSD", etc.
- # Setting this to "linux2" is not ideal, but only Windows or Mac
- # are actually checked for and the rest of the module expects
- # *sys.platform* style strings.
- system = 'linux2'
-else:
- system = sys.platform
-
-
-
-def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
- r"""Return full path to the user-specific data dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "roaming" (boolean, default False) can be set True to use the Windows
- roaming appdata directory. That means that for users on a Windows
- network setup for roaming profiles, this user data will be
- sync'd on login. See
-
- for a discussion of issues.
-
- Typical user data directories are:
- macOS: ~/Library/Application Support/
- Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined
- Win XP (not roaming): C:\Documents and Settings\\Application Data\\
- Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\
- Win 7 (not roaming): C:\Users\\AppData\Local\\
- Win 7 (roaming): C:\Users\\AppData\Roaming\\
-
- For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
- That means, by default "~/.local/share/".
- """
- if system == "win32":
- if appauthor is None:
- appauthor = appname
- const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
- path = os.path.normpath(_get_win_folder(const))
- if appname:
- if appauthor is not False:
- path = os.path.join(path, appauthor, appname)
- else:
- path = os.path.join(path, appname)
- elif system == 'darwin':
- path = os.path.expanduser('~/Library/Application Support/')
- if appname:
- path = os.path.join(path, appname)
- else:
- path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
- if appname:
- path = os.path.join(path, appname)
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
- """Return full path to the user-shared data dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "multipath" is an optional parameter only applicable to *nix
- which indicates that the entire list of data dirs should be
- returned. By default, the first item from XDG_DATA_DIRS is
- returned, or '/usr/local/share/',
- if XDG_DATA_DIRS is not set
-
- Typical user data directories are:
- macOS: /Library/Application Support/
- Unix: /usr/local/share/ or /usr/share/
- Win XP: C:\Documents and Settings\All Users\Application Data\\
- Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
- Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7.
-
- For Unix, this is using the $XDG_DATA_DIRS[0] default.
-
- WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
- """
- if system == "win32":
- if appauthor is None:
- appauthor = appname
- path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
- if appname:
- if appauthor is not False:
- path = os.path.join(path, appauthor, appname)
- else:
- path = os.path.join(path, appname)
- elif system == 'darwin':
- path = os.path.expanduser('/Library/Application Support')
- if appname:
- path = os.path.join(path, appname)
- else:
- # XDG default for $XDG_DATA_DIRS
- # only first, if multipath is False
- path = os.getenv('XDG_DATA_DIRS',
- os.pathsep.join(['/usr/local/share', '/usr/share']))
- pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
- if appname:
- if version:
- appname = os.path.join(appname, version)
- pathlist = [os.sep.join([x, appname]) for x in pathlist]
-
- if multipath:
- path = os.pathsep.join(pathlist)
- else:
- path = pathlist[0]
- return path
-
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
- r"""Return full path to the user-specific config dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "roaming" (boolean, default False) can be set True to use the Windows
- roaming appdata directory. That means that for users on a Windows
- network setup for roaming profiles, this user data will be
- sync'd on login. See
-
- for a discussion of issues.
-
- Typical user data directories are:
- macOS: same as user_data_dir
- Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined
- Win *: same as user_data_dir
-
- For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
- That means, by deafult "~/.config/".
- """
- if system in ["win32", "darwin"]:
- path = user_data_dir(appname, appauthor, None, roaming)
- else:
- path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
- if appname:
- path = os.path.join(path, appname)
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
- """Return full path to the user-shared data dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "multipath" is an optional parameter only applicable to *nix
- which indicates that the entire list of config dirs should be
- returned. By default, the first item from XDG_CONFIG_DIRS is
- returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set
-
- Typical user data directories are:
- macOS: same as site_data_dir
- Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in
- $XDG_CONFIG_DIRS
- Win *: same as site_data_dir
- Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
-
- For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
-
- WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
- """
- if system in ["win32", "darwin"]:
- path = site_data_dir(appname, appauthor)
- if appname and version:
- path = os.path.join(path, version)
- else:
- # XDG default for $XDG_CONFIG_DIRS
- # only first, if multipath is False
- path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
- pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
- if appname:
- if version:
- appname = os.path.join(appname, version)
- pathlist = [os.sep.join([x, appname]) for x in pathlist]
-
- if multipath:
- path = os.pathsep.join(pathlist)
- else:
- path = pathlist[0]
- return path
-
-
-def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
- r"""Return full path to the user-specific cache dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "opinion" (boolean) can be False to disable the appending of
- "Cache" to the base app data dir for Windows. See
- discussion below.
-
- Typical user cache directories are:
- macOS: ~/Library/Caches/
- Unix: ~/.cache/ (XDG default)
- Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache
- Vista: C:\Users\\AppData\Local\\\Cache
-
- On Windows the only suggestion in the MSDN docs is that local settings go in
- the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
- app data dir (the default returned by `user_data_dir` above). Apps typically
- put cache data somewhere *under* the given dir here. Some examples:
- ...\Mozilla\Firefox\Profiles\\Cache
- ...\Acme\SuperApp\Cache\1.0
- OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
- This can be disabled with the `opinion=False` option.
- """
- if system == "win32":
- if appauthor is None:
- appauthor = appname
- path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
- if appname:
- if appauthor is not False:
- path = os.path.join(path, appauthor, appname)
- else:
- path = os.path.join(path, appname)
- if opinion:
- path = os.path.join(path, "Cache")
- elif system == 'darwin':
- path = os.path.expanduser('~/Library/Caches')
- if appname:
- path = os.path.join(path, appname)
- else:
- path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
- if appname:
- path = os.path.join(path, appname)
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
- r"""Return full path to the user-specific log dir for this application.
-
- "appname" is the name of application.
- If None, just the system directory is returned.
- "appauthor" (only used on Windows) is the name of the
- appauthor or distributing body for this application. Typically
- it is the owning company name. This falls back to appname. You may
- pass False to disable it.
- "version" is an optional version path element to append to the
- path. You might want to use this if you want multiple versions
- of your app to be able to run independently. If used, this
- would typically be ".".
- Only applied when appname is present.
- "opinion" (boolean) can be False to disable the appending of
- "Logs" to the base app data dir for Windows, and "log" to the
- base cache dir for Unix. See discussion below.
-
- Typical user cache directories are:
- macOS: ~/Library/Logs/
- Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined
- Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs
- Vista: C:\Users\\AppData\Local\\\Logs
-
- On Windows the only suggestion in the MSDN docs is that local settings
- go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
- examples of what some windows apps use for a logs dir.)
-
- OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
- value for Windows and appends "log" to the user cache dir for Unix.
- This can be disabled with the `opinion=False` option.
- """
- if system == "darwin":
- path = os.path.join(
- os.path.expanduser('~/Library/Logs'),
- appname)
- elif system == "win32":
- path = user_data_dir(appname, appauthor, version)
- version = False
- if opinion:
- path = os.path.join(path, "Logs")
- else:
- path = user_cache_dir(appname, appauthor, version)
- version = False
- if opinion:
- path = os.path.join(path, "log")
- if appname and version:
- path = os.path.join(path, version)
- return path
-
-
-class AppDirs(object):
- """Convenience wrapper for getting application dirs."""
- def __init__(self, appname, appauthor=None, version=None, roaming=False,
- multipath=False):
- self.appname = appname
- self.appauthor = appauthor
- self.version = version
- self.roaming = roaming
- self.multipath = multipath
-
- @property
- def user_data_dir(self):
- return user_data_dir(self.appname, self.appauthor,
- version=self.version, roaming=self.roaming)
-
- @property
- def site_data_dir(self):
- return site_data_dir(self.appname, self.appauthor,
- version=self.version, multipath=self.multipath)
-
- @property
- def user_config_dir(self):
- return user_config_dir(self.appname, self.appauthor,
- version=self.version, roaming=self.roaming)
-
- @property
- def site_config_dir(self):
- return site_config_dir(self.appname, self.appauthor,
- version=self.version, multipath=self.multipath)
-
- @property
- def user_cache_dir(self):
- return user_cache_dir(self.appname, self.appauthor,
- version=self.version)
-
- @property
- def user_log_dir(self):
- return user_log_dir(self.appname, self.appauthor,
- version=self.version)
-
-
-#---- internal support stuff
-
-def _get_win_folder_from_registry(csidl_name):
- """This is a fallback technique at best. I'm not sure if using the
- registry for this guarantees us the correct answer for all CSIDL_*
- names.
- """
- import _winreg
-
- shell_folder_name = {
- "CSIDL_APPDATA": "AppData",
- "CSIDL_COMMON_APPDATA": "Common AppData",
- "CSIDL_LOCAL_APPDATA": "Local AppData",
- }[csidl_name]
-
- key = _winreg.OpenKey(
- _winreg.HKEY_CURRENT_USER,
- r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
- )
- dir, type = _winreg.QueryValueEx(key, shell_folder_name)
- return dir
-
-
-def _get_win_folder_with_pywin32(csidl_name):
- from win32com.shell import shellcon, shell
- dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
- # Try to make this a unicode path because SHGetFolderPath does
- # not return unicode strings when there is unicode data in the
- # path.
- try:
- dir = unicode(dir)
-
- # Downgrade to short path name if have highbit chars. See
- # .
- has_high_char = False
- for c in dir:
- if ord(c) > 255:
- has_high_char = True
- break
- if has_high_char:
- try:
- import win32api
- dir = win32api.GetShortPathName(dir)
- except ImportError:
- pass
- except UnicodeError:
- pass
- return dir
-
-
-def _get_win_folder_with_ctypes(csidl_name):
- import ctypes
-
- csidl_const = {
- "CSIDL_APPDATA": 26,
- "CSIDL_COMMON_APPDATA": 35,
- "CSIDL_LOCAL_APPDATA": 28,
- }[csidl_name]
-
- buf = ctypes.create_unicode_buffer(1024)
- ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
-
- # Downgrade to short path name if have highbit chars. See
- # .
- has_high_char = False
- for c in buf:
- if ord(c) > 255:
- has_high_char = True
- break
- if has_high_char:
- buf2 = ctypes.create_unicode_buffer(1024)
- if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
- buf = buf2
-
- return buf.value
-
-def _get_win_folder_with_jna(csidl_name):
- import array
- from com.sun import jna
- from com.sun.jna.platform import win32
-
- buf_size = win32.WinDef.MAX_PATH * 2
- buf = array.zeros('c', buf_size)
- shell = win32.Shell32.INSTANCE
- shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
- dir = jna.Native.toString(buf.tostring()).rstrip("\0")
-
- # Downgrade to short path name if have highbit chars. See
- # .
- has_high_char = False
- for c in dir:
- if ord(c) > 255:
- has_high_char = True
- break
- if has_high_char:
- buf = array.zeros('c', buf_size)
- kernel = win32.Kernel32.INSTANCE
- if kernal.GetShortPathName(dir, buf, buf_size):
- dir = jna.Native.toString(buf.tostring()).rstrip("\0")
-
- return dir
-
-if system == "win32":
- try:
- import win32com.shell
- _get_win_folder = _get_win_folder_with_pywin32
- except ImportError:
- try:
- from ctypes import windll
- _get_win_folder = _get_win_folder_with_ctypes
- except ImportError:
- try:
- import com.sun.jna
- _get_win_folder = _get_win_folder_with_jna
- except ImportError:
- _get_win_folder = _get_win_folder_from_registry
-
-
-#---- self test code
-
-if __name__ == "__main__":
- appname = "MyApp"
- appauthor = "MyCompany"
-
- props = ("user_data_dir", "site_data_dir",
- "user_config_dir", "site_config_dir",
- "user_cache_dir", "user_log_dir")
-
- print("-- app dirs (with optional 'version')")
- dirs = AppDirs(appname, appauthor, version="1.0")
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
-
- print("\n-- app dirs (without optional 'version')")
- dirs = AppDirs(appname, appauthor)
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
-
- print("\n-- app dirs (without optional 'appauthor')")
- dirs = AppDirs(appname)
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
-
- print("\n-- app dirs (with disabled 'appauthor')")
- dirs = AppDirs(appname, appauthor=False)
- for prop in props:
- print("%s: %s" % (prop, getattr(dirs, prop)))
diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py
deleted file mode 100644
index ec9da2e..0000000
--- a/env/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-"""CacheControl import Interface.
-
-Make it easy to import from cachecontrol without long namespaces.
-"""
-__author__ = 'Eric Larson'
-__email__ = 'eric@ionrock.org'
-__version__ = '0.11.7'
-
-from .wrapper import CacheControl
-from .adapter import CacheControlAdapter
-from .controller import CacheController
diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py
deleted file mode 100644
index afdcc88..0000000
--- a/env/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import logging
-
-from pip._vendor import requests
-
-from pip._vendor.cachecontrol.adapter import CacheControlAdapter
-from pip._vendor.cachecontrol.cache import DictCache
-from pip._vendor.cachecontrol.controller import logger
-
-from argparse import ArgumentParser
-
-
-def setup_logging():
- logger.setLevel(logging.DEBUG)
- handler = logging.StreamHandler()
- logger.addHandler(handler)
-
-
-def get_session():
- adapter = CacheControlAdapter(
- DictCache(),
- cache_etags=True,
- serializer=None,
- heuristic=None,
- )
- sess = requests.Session()
- sess.mount('http://', adapter)
- sess.mount('https://', adapter)
-
- sess.cache_controller = adapter.controller
- return sess
-
-
-def get_args():
- parser = ArgumentParser()
- parser.add_argument('url', help='The URL to try and cache')
- return parser.parse_args()
-
-
-def main(args=None):
- args = get_args()
- sess = get_session()
-
- # Make a request to get a response
- resp = sess.get(args.url)
-
- # Turn on logging
- setup_logging()
-
- # try setting the cache
- sess.cache_controller.cache_response(resp.request, resp.raw)
-
- # Now try to get it
- if sess.cache_controller.cached_request(resp.request):
- print('Cached!')
- else:
- print('Not cached :(')
-
-
-if __name__ == '__main__':
- main()
diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py
deleted file mode 100644
index 2348856..0000000
--- a/env/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py
+++ /dev/null
@@ -1,125 +0,0 @@
-import types
-import functools
-
-from pip._vendor.requests.adapters import HTTPAdapter
-
-from .controller import CacheController
-from .cache import DictCache
-from .filewrapper import CallbackFileWrapper
-
-
-class CacheControlAdapter(HTTPAdapter):
- invalidating_methods = set(['PUT', 'DELETE'])
-
- def __init__(self, cache=None,
- cache_etags=True,
- controller_class=None,
- serializer=None,
- heuristic=None,
- *args, **kw):
- super(CacheControlAdapter, self).__init__(*args, **kw)
- self.cache = cache or DictCache()
- self.heuristic = heuristic
-
- controller_factory = controller_class or CacheController
- self.controller = controller_factory(
- self.cache,
- cache_etags=cache_etags,
- serializer=serializer,
- )
-
- def send(self, request, **kw):
- """
- Send a request. Use the request information to see if it
- exists in the cache and cache the response if we need to and can.
- """
- if request.method == 'GET':
- cached_response = self.controller.cached_request(request)
- if cached_response:
- return self.build_response(request, cached_response,
- from_cache=True)
-
- # check for etags and add headers if appropriate
- request.headers.update(
- self.controller.conditional_headers(request)
- )
-
- resp = super(CacheControlAdapter, self).send(request, **kw)
-
- return resp
-
- def build_response(self, request, response, from_cache=False):
- """
- Build a response by making a request or using the cache.
-
- This will end up calling send and returning a potentially
- cached response
- """
- if not from_cache and request.method == 'GET':
- # Check for any heuristics that might update headers
- # before trying to cache.
- if self.heuristic:
- response = self.heuristic.apply(response)
-
- # apply any expiration heuristics
- if response.status == 304:
- # We must have sent an ETag request. This could mean
- # that we've been expired already or that we simply
- # have an etag. In either case, we want to try and
- # update the cache if that is the case.
- cached_response = self.controller.update_cached_response(
- request, response
- )
-
- if cached_response is not response:
- from_cache = True
-
- # We are done with the server response, read a
- # possible response body (compliant servers will
- # not return one, but we cannot be 100% sure) and
- # release the connection back to the pool.
- response.read(decode_content=False)
- response.release_conn()
-
- response = cached_response
-
- # We always cache the 301 responses
- elif response.status == 301:
- self.controller.cache_response(request, response)
- else:
- # Wrap the response file with a wrapper that will cache the
- # response when the stream has been consumed.
- response._fp = CallbackFileWrapper(
- response._fp,
- functools.partial(
- self.controller.cache_response,
- request,
- response,
- )
- )
- if response.chunked:
- super_update_chunk_length = response._update_chunk_length
-
- def _update_chunk_length(self):
- super_update_chunk_length()
- if self.chunk_left == 0:
- self._fp._close()
- response._update_chunk_length = types.MethodType(_update_chunk_length, response)
-
- resp = super(CacheControlAdapter, self).build_response(
- request, response
- )
-
- # See if we should invalidate the cache.
- if request.method in self.invalidating_methods and resp.ok:
- cache_url = self.controller.cache_url(request.url)
- self.cache.delete(cache_url)
-
- # Give the request a from_cache attr to let people use it
- resp.from_cache = from_cache
-
- return resp
-
- def close(self):
- self.cache.close()
- super(CacheControlAdapter, self).close()
diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/cache.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/cache.py
deleted file mode 100644
index 7389a73..0000000
--- a/env/Lib/site-packages/pip/_vendor/cachecontrol/cache.py
+++ /dev/null
@@ -1,39 +0,0 @@
-"""
-The cache object API for implementing caches. The default is a thread
-safe in-memory dictionary.
-"""
-from threading import Lock
-
-
-class BaseCache(object):
-
- def get(self, key):
- raise NotImplemented()
-
- def set(self, key, value):
- raise NotImplemented()
-
- def delete(self, key):
- raise NotImplemented()
-
- def close(self):
- pass
-
-
-class DictCache(BaseCache):
-
- def __init__(self, init_dict=None):
- self.lock = Lock()
- self.data = init_dict or {}
-
- def get(self, key):
- return self.data.get(key, None)
-
- def set(self, key, value):
- with self.lock:
- self.data.update({key: value})
-
- def delete(self, key):
- with self.lock:
- if key in self.data:
- self.data.pop(key)
diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py
deleted file mode 100644
index f9e66a1..0000000
--- a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from textwrap import dedent
-
-try:
- from .file_cache import FileCache
-except ImportError:
- notice = dedent('''
- NOTE: In order to use the FileCache you must have
- lockfile installed. You can install it via pip:
- pip install lockfile
- ''')
- print(notice)
-
-
-try:
- import redis
- from .redis_cache import RedisCache
-except ImportError:
- pass
diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
deleted file mode 100644
index b77728f..0000000
--- a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
+++ /dev/null
@@ -1,116 +0,0 @@
-import hashlib
-import os
-
-from pip._vendor.lockfile import LockFile
-from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile
-
-from ..cache import BaseCache
-from ..controller import CacheController
-
-
-def _secure_open_write(filename, fmode):
- # We only want to write to this file, so open it in write only mode
- flags = os.O_WRONLY
-
- # os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
- # will open *new* files.
- # We specify this because we want to ensure that the mode we pass is the
- # mode of the file.
- flags |= os.O_CREAT | os.O_EXCL
-
- # Do not follow symlinks to prevent someone from making a symlink that
- # we follow and insecurely open a cache file.
- if hasattr(os, "O_NOFOLLOW"):
- flags |= os.O_NOFOLLOW
-
- # On Windows we'll mark this file as binary
- if hasattr(os, "O_BINARY"):
- flags |= os.O_BINARY
-
- # Before we open our file, we want to delete any existing file that is
- # there
- try:
- os.remove(filename)
- except (IOError, OSError):
- # The file must not exist already, so we can just skip ahead to opening
- pass
-
- # Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
- # race condition happens between the os.remove and this line, that an
- # error will be raised. Because we utilize a lockfile this should only
- # happen if someone is attempting to attack us.
- fd = os.open(filename, flags, fmode)
- try:
- return os.fdopen(fd, "wb")
- except:
- # An error occurred wrapping our FD in a file object
- os.close(fd)
- raise
-
-
-class FileCache(BaseCache):
- def __init__(self, directory, forever=False, filemode=0o0600,
- dirmode=0o0700, use_dir_lock=None, lock_class=None):
-
- if use_dir_lock is not None and lock_class is not None:
- raise ValueError("Cannot use use_dir_lock and lock_class together")
-
- if use_dir_lock:
- lock_class = MkdirLockFile
-
- if lock_class is None:
- lock_class = LockFile
-
- self.directory = directory
- self.forever = forever
- self.filemode = filemode
- self.dirmode = dirmode
- self.lock_class = lock_class
-
-
- @staticmethod
- def encode(x):
- return hashlib.sha224(x.encode()).hexdigest()
-
- def _fn(self, name):
- # NOTE: This method should not change as some may depend on it.
- # See: https://github.com/ionrock/cachecontrol/issues/63
- hashed = self.encode(name)
- parts = list(hashed[:5]) + [hashed]
- return os.path.join(self.directory, *parts)
-
- def get(self, key):
- name = self._fn(key)
- if not os.path.exists(name):
- return None
-
- with open(name, 'rb') as fh:
- return fh.read()
-
- def set(self, key, value):
- name = self._fn(key)
-
- # Make sure the directory exists
- try:
- os.makedirs(os.path.dirname(name), self.dirmode)
- except (IOError, OSError):
- pass
-
- with self.lock_class(name) as lock:
- # Write our actual file
- with _secure_open_write(lock.path, self.filemode) as fh:
- fh.write(value)
-
- def delete(self, key):
- name = self._fn(key)
- if not self.forever:
- os.remove(name)
-
-
-def url_to_file_path(url, filecache):
- """Return the file cache path based on the URL.
-
- This does not ensure the file exists!
- """
- key = CacheController.cache_url(url)
- return filecache._fn(key)
diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py
deleted file mode 100644
index 9f5d55f..0000000
--- a/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from __future__ import division
-
-from datetime import datetime
-
-
-def total_seconds(td):
- """Python 2.6 compatability"""
- if hasattr(td, 'total_seconds'):
- return td.total_seconds()
-
- ms = td.microseconds
- secs = (td.seconds + td.days * 24 * 3600)
- return (ms + secs * 10**6) / 10**6
-
-
-class RedisCache(object):
-
- def __init__(self, conn):
- self.conn = conn
-
- def get(self, key):
- return self.conn.get(key)
-
- def set(self, key, value, expires=None):
- if not expires:
- self.conn.set(key, value)
- else:
- expires = expires - datetime.now()
- self.conn.setex(key, total_seconds(expires), value)
-
- def delete(self, key):
- self.conn.delete(key)
-
- def clear(self):
- """Helper for clearing all the keys in a database. Use with
- caution!"""
- for key in self.conn.keys():
- self.conn.delete(key)
-
- def close(self):
- self.conn.disconnect()
diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/compat.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/compat.py
deleted file mode 100644
index 018e6ac..0000000
--- a/env/Lib/site-packages/pip/_vendor/cachecontrol/compat.py
+++ /dev/null
@@ -1,20 +0,0 @@
-try:
- from urllib.parse import urljoin
-except ImportError:
- from urlparse import urljoin
-
-
-try:
- import cPickle as pickle
-except ImportError:
- import pickle
-
-
-from pip._vendor.requests.packages.urllib3.response import HTTPResponse
-from pip._vendor.requests.packages.urllib3.util import is_fp_closed
-
-# Replicate some six behaviour
-try:
- text_type = (unicode,)
-except NameError:
- text_type = (str,)
diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py
deleted file mode 100644
index 5eb961f..0000000
--- a/env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py
+++ /dev/null
@@ -1,353 +0,0 @@
-"""
-The httplib2 algorithms ported for use with requests.
-"""
-import logging
-import re
-import calendar
-import time
-from email.utils import parsedate_tz
-
-from pip._vendor.requests.structures import CaseInsensitiveDict
-
-from .cache import DictCache
-from .serialize import Serializer
-
-
-logger = logging.getLogger(__name__)
-
-URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
-
-
-def parse_uri(uri):
- """Parses a URI using the regex given in Appendix B of RFC 3986.
-
- (scheme, authority, path, query, fragment) = parse_uri(uri)
- """
- groups = URI.match(uri).groups()
- return (groups[1], groups[3], groups[4], groups[6], groups[8])
-
-
-class CacheController(object):
- """An interface to see if request should cached or not.
- """
- def __init__(self, cache=None, cache_etags=True, serializer=None):
- self.cache = cache or DictCache()
- self.cache_etags = cache_etags
- self.serializer = serializer or Serializer()
-
- @classmethod
- def _urlnorm(cls, uri):
- """Normalize the URL to create a safe key for the cache"""
- (scheme, authority, path, query, fragment) = parse_uri(uri)
- if not scheme or not authority:
- raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
-
- scheme = scheme.lower()
- authority = authority.lower()
-
- if not path:
- path = "/"
-
- # Could do syntax based normalization of the URI before
- # computing the digest. See Section 6.2.2 of Std 66.
- request_uri = query and "?".join([path, query]) or path
- defrag_uri = scheme + "://" + authority + request_uri
-
- return defrag_uri
-
- @classmethod
- def cache_url(cls, uri):
- return cls._urlnorm(uri)
-
- def parse_cache_control(self, headers):
- """
- Parse the cache control headers returning a dictionary with values
- for the different directives.
- """
- retval = {}
-
- cc_header = 'cache-control'
- if 'Cache-Control' in headers:
- cc_header = 'Cache-Control'
-
- if cc_header in headers:
- parts = headers[cc_header].split(',')
- parts_with_args = [
- tuple([x.strip().lower() for x in part.split("=", 1)])
- for part in parts if -1 != part.find("=")
- ]
- parts_wo_args = [
- (name.strip().lower(), 1)
- for name in parts if -1 == name.find("=")
- ]
- retval = dict(parts_with_args + parts_wo_args)
- return retval
-
- def cached_request(self, request):
- """
- Return a cached response if it exists in the cache, otherwise
- return False.
- """
- cache_url = self.cache_url(request.url)
- logger.debug('Looking up "%s" in the cache', cache_url)
- cc = self.parse_cache_control(request.headers)
-
- # Bail out if the request insists on fresh data
- if 'no-cache' in cc:
- logger.debug('Request header has "no-cache", cache bypassed')
- return False
-
- if 'max-age' in cc and cc['max-age'] == 0:
- logger.debug('Request header has "max_age" as 0, cache bypassed')
- return False
-
- # Request allows serving from the cache, let's see if we find something
- cache_data = self.cache.get(cache_url)
- if cache_data is None:
- logger.debug('No cache entry available')
- return False
-
- # Check whether it can be deserialized
- resp = self.serializer.loads(request, cache_data)
- if not resp:
- logger.warning('Cache entry deserialization failed, entry ignored')
- return False
-
- # If we have a cached 301, return it immediately. We don't
- # need to test our response for other headers b/c it is
- # intrinsically "cacheable" as it is Permanent.
- # See:
- # https://tools.ietf.org/html/rfc7231#section-6.4.2
- #
- # Client can try to refresh the value by repeating the request
- # with cache busting headers as usual (ie no-cache).
- if resp.status == 301:
- msg = ('Returning cached "301 Moved Permanently" response '
- '(ignoring date and etag information)')
- logger.debug(msg)
- return resp
-
- headers = CaseInsensitiveDict(resp.headers)
- if not headers or 'date' not in headers:
- if 'etag' not in headers:
- # Without date or etag, the cached response can never be used
- # and should be deleted.
- logger.debug('Purging cached response: no date or etag')
- self.cache.delete(cache_url)
- logger.debug('Ignoring cached response: no date')
- return False
-
- now = time.time()
- date = calendar.timegm(
- parsedate_tz(headers['date'])
- )
- current_age = max(0, now - date)
- logger.debug('Current age based on date: %i', current_age)
-
- # TODO: There is an assumption that the result will be a
- # urllib3 response object. This may not be best since we
- # could probably avoid instantiating or constructing the
- # response until we know we need it.
- resp_cc = self.parse_cache_control(headers)
-
- # determine freshness
- freshness_lifetime = 0
-
- # Check the max-age pragma in the cache control header
- if 'max-age' in resp_cc and resp_cc['max-age'].isdigit():
- freshness_lifetime = int(resp_cc['max-age'])
- logger.debug('Freshness lifetime from max-age: %i',
- freshness_lifetime)
-
- # If there isn't a max-age, check for an expires header
- elif 'expires' in headers:
- expires = parsedate_tz(headers['expires'])
- if expires is not None:
- expire_time = calendar.timegm(expires) - date
- freshness_lifetime = max(0, expire_time)
- logger.debug("Freshness lifetime from expires: %i",
- freshness_lifetime)
-
- # Determine if we are setting freshness limit in the
- # request. Note, this overrides what was in the response.
- if 'max-age' in cc:
- try:
- freshness_lifetime = int(cc['max-age'])
- logger.debug('Freshness lifetime from request max-age: %i',
- freshness_lifetime)
- except ValueError:
- freshness_lifetime = 0
-
- if 'min-fresh' in cc:
- try:
- min_fresh = int(cc['min-fresh'])
- except ValueError:
- min_fresh = 0
- # adjust our current age by our min fresh
- current_age += min_fresh
- logger.debug('Adjusted current age from min-fresh: %i',
- current_age)
-
- # Return entry if it is fresh enough
- if freshness_lifetime > current_age:
- logger.debug('The response is "fresh", returning cached response')
- logger.debug('%i > %i', freshness_lifetime, current_age)
- return resp
-
- # we're not fresh. If we don't have an Etag, clear it out
- if 'etag' not in headers:
- logger.debug(
- 'The cached response is "stale" with no etag, purging'
- )
- self.cache.delete(cache_url)
-
- # return the original handler
- return False
-
- def conditional_headers(self, request):
- cache_url = self.cache_url(request.url)
- resp = self.serializer.loads(request, self.cache.get(cache_url))
- new_headers = {}
-
- if resp:
- headers = CaseInsensitiveDict(resp.headers)
-
- if 'etag' in headers:
- new_headers['If-None-Match'] = headers['ETag']
-
- if 'last-modified' in headers:
- new_headers['If-Modified-Since'] = headers['Last-Modified']
-
- return new_headers
-
- def cache_response(self, request, response, body=None):
- """
- Algorithm for caching requests.
-
- This assumes a requests Response object.
- """
- # From httplib2: Don't cache 206's since we aren't going to
- # handle byte range requests
- cacheable_status_codes = [200, 203, 300, 301]
- if response.status not in cacheable_status_codes:
- logger.debug(
- 'Status code %s not in %s',
- response.status,
- cacheable_status_codes
- )
- return
-
- response_headers = CaseInsensitiveDict(response.headers)
-
- # If we've been given a body, our response has a Content-Length, that
- # Content-Length is valid then we can check to see if the body we've
- # been given matches the expected size, and if it doesn't we'll just
- # skip trying to cache it.
- if (body is not None and
- "content-length" in response_headers and
- response_headers["content-length"].isdigit() and
- int(response_headers["content-length"]) != len(body)):
- return
-
- cc_req = self.parse_cache_control(request.headers)
- cc = self.parse_cache_control(response_headers)
-
- cache_url = self.cache_url(request.url)
- logger.debug('Updating cache with response from "%s"', cache_url)
-
- # Delete it from the cache if we happen to have it stored there
- no_store = False
- if cc.get('no-store'):
- no_store = True
- logger.debug('Response header has "no-store"')
- if cc_req.get('no-store'):
- no_store = True
- logger.debug('Request header has "no-store"')
- if no_store and self.cache.get(cache_url):
- logger.debug('Purging existing cache entry to honor "no-store"')
- self.cache.delete(cache_url)
-
- # If we've been given an etag, then keep the response
- if self.cache_etags and 'etag' in response_headers:
- logger.debug('Caching due to etag')
- self.cache.set(
- cache_url,
- self.serializer.dumps(request, response, body=body),
- )
-
- # Add to the cache any 301s. We do this before looking that
- # the Date headers.
- elif response.status == 301:
- logger.debug('Caching permanant redirect')
- self.cache.set(
- cache_url,
- self.serializer.dumps(request, response)
- )
-
- # Add to the cache if the response headers demand it. If there
- # is no date header then we can't do anything about expiring
- # the cache.
- elif 'date' in response_headers:
- # cache when there is a max-age > 0
- if cc and cc.get('max-age'):
- if cc['max-age'].isdigit() and int(cc['max-age']) > 0:
- logger.debug('Caching b/c date exists and max-age > 0')
- self.cache.set(
- cache_url,
- self.serializer.dumps(request, response, body=body),
- )
-
- # If the request can expire, it means we should cache it
- # in the meantime.
- elif 'expires' in response_headers:
- if response_headers['expires']:
- logger.debug('Caching b/c of expires header')
- self.cache.set(
- cache_url,
- self.serializer.dumps(request, response, body=body),
- )
-
- def update_cached_response(self, request, response):
- """On a 304 we will get a new set of headers that we want to
- update our cached value with, assuming we have one.
-
- This should only ever be called when we've sent an ETag and
- gotten a 304 as the response.
- """
- cache_url = self.cache_url(request.url)
-
- cached_response = self.serializer.loads(
- request,
- self.cache.get(cache_url)
- )
-
- if not cached_response:
- # we didn't have a cached response
- return response
-
- # Lets update our headers with the headers from the new request:
- # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
- #
- # The server isn't supposed to send headers that would make
- # the cached body invalid. But... just in case, we'll be sure
- # to strip out ones we know that might be problmatic due to
- # typical assumptions.
- excluded_headers = [
- "content-length",
- ]
-
- cached_response.headers.update(
- dict((k, v) for k, v in response.headers.items()
- if k.lower() not in excluded_headers)
- )
-
- # we want a 200 b/c we have content via the cache
- cached_response.status = 200
-
- # update our cache
- self.cache.set(
- cache_url,
- self.serializer.dumps(request, cached_response),
- )
-
- return cached_response
diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py
deleted file mode 100644
index f1e1ce0..0000000
--- a/env/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from io import BytesIO
-
-
-class CallbackFileWrapper(object):
- """
- Small wrapper around a fp object which will tee everything read into a
- buffer, and when that file is closed it will execute a callback with the
- contents of that buffer.
-
- All attributes are proxied to the underlying file object.
-
- This class uses members with a double underscore (__) leading prefix so as
- not to accidentally shadow an attribute.
- """
-
- def __init__(self, fp, callback):
- self.__buf = BytesIO()
- self.__fp = fp
- self.__callback = callback
-
- def __getattr__(self, name):
- # The vaguaries of garbage collection means that self.__fp is
- # not always set. By using __getattribute__ and the private
- # name[0] allows looking up the attribute value and raising an
- # AttributeError when it doesn't exist. This stop thigns from
- # infinitely recursing calls to getattr in the case where
- # self.__fp hasn't been set.
- #
- # [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
- fp = self.__getattribute__('_CallbackFileWrapper__fp')
- return getattr(fp, name)
-
- def __is_fp_closed(self):
- try:
- return self.__fp.fp is None
- except AttributeError:
- pass
-
- try:
- return self.__fp.closed
- except AttributeError:
- pass
-
- # We just don't cache it then.
- # TODO: Add some logging here...
- return False
-
- def _close(self):
- if self.__callback:
- self.__callback(self.__buf.getvalue())
-
- # We assign this to None here, because otherwise we can get into
- # really tricky problems where the CPython interpreter dead locks
- # because the callback is holding a reference to something which
- # has a __del__ method. Setting this to None breaks the cycle
- # and allows the garbage collector to do it's thing normally.
- self.__callback = None
-
- def read(self, amt=None):
- data = self.__fp.read(amt)
- self.__buf.write(data)
- if self.__is_fp_closed():
- self._close()
-
- return data
-
- def _safe_read(self, amt):
- data = self.__fp._safe_read(amt)
- if amt == 2 and data == b'\r\n':
- # urllib executes this read to toss the CRLF at the end
- # of the chunk.
- return data
-
- self.__buf.write(data)
- if self.__is_fp_closed():
- self._close()
-
- return data
diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py
deleted file mode 100644
index 94715a4..0000000
--- a/env/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py
+++ /dev/null
@@ -1,138 +0,0 @@
-import calendar
-import time
-
-from email.utils import formatdate, parsedate, parsedate_tz
-
-from datetime import datetime, timedelta
-
-TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT"
-
-
-def expire_after(delta, date=None):
- date = date or datetime.now()
- return date + delta
-
-
-def datetime_to_header(dt):
- return formatdate(calendar.timegm(dt.timetuple()))
-
-
-class BaseHeuristic(object):
-
- def warning(self, response):
- """
- Return a valid 1xx warning header value describing the cache
- adjustments.
-
- The response is provided too allow warnings like 113
- http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need
- to explicitly say response is over 24 hours old.
- """
- return '110 - "Response is Stale"'
-
- def update_headers(self, response):
- """Update the response headers with any new headers.
-
- NOTE: This SHOULD always include some Warning header to
- signify that the response was cached by the client, not
- by way of the provided headers.
- """
- return {}
-
- def apply(self, response):
- updated_headers = self.update_headers(response)
-
- if updated_headers:
- response.headers.update(updated_headers)
- warning_header_value = self.warning(response)
- if warning_header_value is not None:
- response.headers.update({'Warning': warning_header_value})
-
- return response
-
-
-class OneDayCache(BaseHeuristic):
- """
- Cache the response by providing an expires 1 day in the
- future.
- """
- def update_headers(self, response):
- headers = {}
-
- if 'expires' not in response.headers:
- date = parsedate(response.headers['date'])
- expires = expire_after(timedelta(days=1),
- date=datetime(*date[:6]))
- headers['expires'] = datetime_to_header(expires)
- headers['cache-control'] = 'public'
- return headers
-
-
-class ExpiresAfter(BaseHeuristic):
- """
- Cache **all** requests for a defined time period.
- """
-
- def __init__(self, **kw):
- self.delta = timedelta(**kw)
-
- def update_headers(self, response):
- expires = expire_after(self.delta)
- return {
- 'expires': datetime_to_header(expires),
- 'cache-control': 'public',
- }
-
- def warning(self, response):
- tmpl = '110 - Automatically cached for %s. Response might be stale'
- return tmpl % self.delta
-
-
-class LastModified(BaseHeuristic):
- """
- If there is no Expires header already, fall back on Last-Modified
- using the heuristic from
- http://tools.ietf.org/html/rfc7234#section-4.2.2
- to calculate a reasonable value.
-
- Firefox also does something like this per
- https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ
- http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397
- Unlike mozilla we limit this to 24-hr.
- """
- cacheable_by_default_statuses = set([
- 200, 203, 204, 206, 300, 301, 404, 405, 410, 414, 501
- ])
-
- def update_headers(self, resp):
- headers = resp.headers
-
- if 'expires' in headers:
- return {}
-
- if 'cache-control' in headers and headers['cache-control'] != 'public':
- return {}
-
- if resp.status not in self.cacheable_by_default_statuses:
- return {}
-
- if 'date' not in headers or 'last-modified' not in headers:
- return {}
-
- date = calendar.timegm(parsedate_tz(headers['date']))
- last_modified = parsedate(headers['last-modified'])
- if date is None or last_modified is None:
- return {}
-
- now = time.time()
- current_age = max(0, now - date)
- delta = date - calendar.timegm(last_modified)
- freshness_lifetime = max(0, min(delta / 10, 24 * 3600))
- if freshness_lifetime <= current_age:
- return {}
-
- expires = date + freshness_lifetime
- return {'expires': time.strftime(TIME_FMT, time.gmtime(expires))}
-
- def warning(self, resp):
- return None
diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py
deleted file mode 100644
index 8f9c589..0000000
--- a/env/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py
+++ /dev/null
@@ -1,196 +0,0 @@
-import base64
-import io
-import json
-import zlib
-
-from pip._vendor.requests.structures import CaseInsensitiveDict
-
-from .compat import HTTPResponse, pickle, text_type
-
-
-def _b64_encode_bytes(b):
- return base64.b64encode(b).decode("ascii")
-
-
-def _b64_encode_str(s):
- return _b64_encode_bytes(s.encode("utf8"))
-
-
-def _b64_encode(s):
- if isinstance(s, text_type):
- return _b64_encode_str(s)
- return _b64_encode_bytes(s)
-
-
-def _b64_decode_bytes(b):
- return base64.b64decode(b.encode("ascii"))
-
-
-def _b64_decode_str(s):
- return _b64_decode_bytes(s).decode("utf8")
-
-
-class Serializer(object):
-
- def dumps(self, request, response, body=None):
- response_headers = CaseInsensitiveDict(response.headers)
-
- if body is None:
- body = response.read(decode_content=False)
-
- # NOTE: 99% sure this is dead code. I'm only leaving it
- # here b/c I don't have a test yet to prove
- # it. Basically, before using
- # `cachecontrol.filewrapper.CallbackFileWrapper`,
- # this made an effort to reset the file handle. The
- # `CallbackFileWrapper` short circuits this code by
- # setting the body as the content is consumed, the
- # result being a `body` argument is *always* passed
- # into cache_response, and in turn,
- # `Serializer.dump`.
- response._fp = io.BytesIO(body)
-
- data = {
- "response": {
- "body": _b64_encode_bytes(body),
- "headers": dict(
- (_b64_encode(k), _b64_encode(v))
- for k, v in response.headers.items()
- ),
- "status": response.status,
- "version": response.version,
- "reason": _b64_encode_str(response.reason),
- "strict": response.strict,
- "decode_content": response.decode_content,
- },
- }
-
- # Construct our vary headers
- data["vary"] = {}
- if "vary" in response_headers:
- varied_headers = response_headers['vary'].split(',')
- for header in varied_headers:
- header = header.strip()
- data["vary"][header] = request.headers.get(header, None)
-
- # Encode our Vary headers to ensure they can be serialized as JSON
- data["vary"] = dict(
- (_b64_encode(k), _b64_encode(v) if v is not None else v)
- for k, v in data["vary"].items()
- )
-
- return b",".join([
- b"cc=2",
- zlib.compress(
- json.dumps(
- data, separators=(",", ":"), sort_keys=True,
- ).encode("utf8"),
- ),
- ])
-
- def loads(self, request, data):
- # Short circuit if we've been given an empty set of data
- if not data:
- return
-
- # Determine what version of the serializer the data was serialized
- # with
- try:
- ver, data = data.split(b",", 1)
- except ValueError:
- ver = b"cc=0"
-
- # Make sure that our "ver" is actually a version and isn't a false
- # positive from a , being in the data stream.
- if ver[:3] != b"cc=":
- data = ver + data
- ver = b"cc=0"
-
- # Get the version number out of the cc=N
- ver = ver.split(b"=", 1)[-1].decode("ascii")
-
- # Dispatch to the actual load method for the given version
- try:
- return getattr(self, "_loads_v{0}".format(ver))(request, data)
- except AttributeError:
- # This is a version we don't have a loads function for, so we'll
- # just treat it as a miss and return None
- return
-
- def prepare_response(self, request, cached):
- """Verify our vary headers match and construct a real urllib3
- HTTPResponse object.
- """
- # Special case the '*' Vary value as it means we cannot actually
- # determine if the cached response is suitable for this request.
- if "*" in cached.get("vary", {}):
- return
-
- # Ensure that the Vary headers for the cached response match our
- # request
- for header, value in cached.get("vary", {}).items():
- if request.headers.get(header, None) != value:
- return
-
- body_raw = cached["response"].pop("body")
-
- headers = CaseInsensitiveDict(data=cached['response']['headers'])
- if headers.get('transfer-encoding', '') == 'chunked':
- headers.pop('transfer-encoding')
-
- cached['response']['headers'] = headers
-
- try:
- body = io.BytesIO(body_raw)
- except TypeError:
- # This can happen if cachecontrol serialized to v1 format (pickle)
- # using Python 2. A Python 2 str(byte string) will be unpickled as
- # a Python 3 str (unicode string), which will cause the above to
- # fail with:
- #
- # TypeError: 'str' does not support the buffer interface
- body = io.BytesIO(body_raw.encode('utf8'))
-
- return HTTPResponse(
- body=body,
- preload_content=False,
- **cached["response"]
- )
-
- def _loads_v0(self, request, data):
- # The original legacy cache data. This doesn't contain enough
- # information to construct everything we need, so we'll treat this as
- # a miss.
- return
-
- def _loads_v1(self, request, data):
- try:
- cached = pickle.loads(data)
- except ValueError:
- return
-
- return self.prepare_response(request, cached)
-
- def _loads_v2(self, request, data):
- try:
- cached = json.loads(zlib.decompress(data).decode("utf8"))
- except ValueError:
- return
-
- # We need to decode the items that we've base64 encoded
- cached["response"]["body"] = _b64_decode_bytes(
- cached["response"]["body"]
- )
- cached["response"]["headers"] = dict(
- (_b64_decode_str(k), _b64_decode_str(v))
- for k, v in cached["response"]["headers"].items()
- )
- cached["response"]["reason"] = _b64_decode_str(
- cached["response"]["reason"],
- )
- cached["vary"] = dict(
- (_b64_decode_str(k), _b64_decode_str(v) if v is not None else v)
- for k, v in cached["vary"].items()
- )
-
- return self.prepare_response(request, cached)
diff --git a/env/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py b/env/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py
deleted file mode 100644
index ea421aa..0000000
--- a/env/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from .adapter import CacheControlAdapter
-from .cache import DictCache
-
-
-def CacheControl(sess,
- cache=None,
- cache_etags=True,
- serializer=None,
- heuristic=None):
-
- cache = cache or DictCache()
- adapter = CacheControlAdapter(
- cache,
- cache_etags=cache_etags,
- serializer=serializer,
- heuristic=heuristic,
- )
- sess.mount('http://', adapter)
- sess.mount('https://', adapter)
-
- return sess
diff --git a/env/Lib/site-packages/pip/_vendor/colorama/__init__.py b/env/Lib/site-packages/pip/_vendor/colorama/__init__.py
deleted file mode 100644
index 670e6b3..0000000
--- a/env/Lib/site-packages/pip/_vendor/colorama/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
-from .initialise import init, deinit, reinit, colorama_text
-from .ansi import Fore, Back, Style, Cursor
-from .ansitowin32 import AnsiToWin32
-
-__version__ = '0.3.7'
-
diff --git a/env/Lib/site-packages/pip/_vendor/colorama/ansi.py b/env/Lib/site-packages/pip/_vendor/colorama/ansi.py
deleted file mode 100644
index 7877658..0000000
--- a/env/Lib/site-packages/pip/_vendor/colorama/ansi.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
-'''
-This module generates ANSI character codes to printing colors to terminals.
-See: http://en.wikipedia.org/wiki/ANSI_escape_code
-'''
-
-CSI = '\033['
-OSC = '\033]'
-BEL = '\007'
-
-
-def code_to_chars(code):
- return CSI + str(code) + 'm'
-
-def set_title(title):
- return OSC + '2;' + title + BEL
-
-def clear_screen(mode=2):
- return CSI + str(mode) + 'J'
-
-def clear_line(mode=2):
- return CSI + str(mode) + 'K'
-
-
-class AnsiCodes(object):
- def __init__(self):
- # the subclasses declare class attributes which are numbers.
- # Upon instantiation we define instance attributes, which are the same
- # as the class attributes but wrapped with the ANSI escape sequence
- for name in dir(self):
- if not name.startswith('_'):
- value = getattr(self, name)
- setattr(self, name, code_to_chars(value))
-
-
-class AnsiCursor(object):
- def UP(self, n=1):
- return CSI + str(n) + 'A'
- def DOWN(self, n=1):
- return CSI + str(n) + 'B'
- def FORWARD(self, n=1):
- return CSI + str(n) + 'C'
- def BACK(self, n=1):
- return CSI + str(n) + 'D'
- def POS(self, x=1, y=1):
- return CSI + str(y) + ';' + str(x) + 'H'
-
-
-class AnsiFore(AnsiCodes):
- BLACK = 30
- RED = 31
- GREEN = 32
- YELLOW = 33
- BLUE = 34
- MAGENTA = 35
- CYAN = 36
- WHITE = 37
- RESET = 39
-
- # These are fairly well supported, but not part of the standard.
- LIGHTBLACK_EX = 90
- LIGHTRED_EX = 91
- LIGHTGREEN_EX = 92
- LIGHTYELLOW_EX = 93
- LIGHTBLUE_EX = 94
- LIGHTMAGENTA_EX = 95
- LIGHTCYAN_EX = 96
- LIGHTWHITE_EX = 97
-
-
-class AnsiBack(AnsiCodes):
- BLACK = 40
- RED = 41
- GREEN = 42
- YELLOW = 43
- BLUE = 44
- MAGENTA = 45
- CYAN = 46
- WHITE = 47
- RESET = 49
-
- # These are fairly well supported, but not part of the standard.
- LIGHTBLACK_EX = 100
- LIGHTRED_EX = 101
- LIGHTGREEN_EX = 102
- LIGHTYELLOW_EX = 103
- LIGHTBLUE_EX = 104
- LIGHTMAGENTA_EX = 105
- LIGHTCYAN_EX = 106
- LIGHTWHITE_EX = 107
-
-
-class AnsiStyle(AnsiCodes):
- BRIGHT = 1
- DIM = 2
- NORMAL = 22
- RESET_ALL = 0
-
-Fore = AnsiFore()
-Back = AnsiBack()
-Style = AnsiStyle()
-Cursor = AnsiCursor()
diff --git a/env/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py b/env/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py
deleted file mode 100644
index b7ff6f2..0000000
--- a/env/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py
+++ /dev/null
@@ -1,236 +0,0 @@
-# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
-import re
-import sys
-import os
-
-from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
-from .winterm import WinTerm, WinColor, WinStyle
-from .win32 import windll, winapi_test
-
-
-winterm = None
-if windll is not None:
- winterm = WinTerm()
-
-
-def is_stream_closed(stream):
- return not hasattr(stream, 'closed') or stream.closed
-
-
-def is_a_tty(stream):
- return hasattr(stream, 'isatty') and stream.isatty()
-
-
-class StreamWrapper(object):
- '''
- Wraps a stream (such as stdout), acting as a transparent proxy for all
- attribute access apart from method 'write()', which is delegated to our
- Converter instance.
- '''
- def __init__(self, wrapped, converter):
- # double-underscore everything to prevent clashes with names of
- # attributes on the wrapped stream object.
- self.__wrapped = wrapped
- self.__convertor = converter
-
- def __getattr__(self, name):
- return getattr(self.__wrapped, name)
-
- def write(self, text):
- self.__convertor.write(text)
-
-
-class AnsiToWin32(object):
- '''
- Implements a 'write()' method which, on Windows, will strip ANSI character
- sequences from the text, and if outputting to a tty, will convert them into
- win32 function calls.
- '''
- ANSI_CSI_RE = re.compile('\001?\033\[((?:\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
- ANSI_OSC_RE = re.compile('\001?\033\]((?:.|;)*?)(\x07)\002?') # Operating System Command
-
- def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
- # The wrapped stream (normally sys.stdout or sys.stderr)
- self.wrapped = wrapped
-
- # should we reset colors to defaults after every .write()
- self.autoreset = autoreset
-
- # create the proxy wrapping our output stream
- self.stream = StreamWrapper(wrapped, self)
-
- on_windows = os.name == 'nt'
- # We test if the WinAPI works, because even if we are on Windows
- # we may be using a terminal that doesn't support the WinAPI
- # (e.g. Cygwin Terminal). In this case it's up to the terminal
- # to support the ANSI codes.
- conversion_supported = on_windows and winapi_test()
-
- # should we strip ANSI sequences from our output?
- if strip is None:
- strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped))
- self.strip = strip
-
- # should we should convert ANSI sequences into win32 calls?
- if convert is None:
- convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped)
- self.convert = convert
-
- # dict of ansi codes to win32 functions and parameters
- self.win32_calls = self.get_win32_calls()
-
- # are we wrapping stderr?
- self.on_stderr = self.wrapped is sys.stderr
-
- def should_wrap(self):
- '''
- True if this class is actually needed. If false, then the output
- stream will not be affected, nor will win32 calls be issued, so
- wrapping stdout is not actually required. This will generally be
- False on non-Windows platforms, unless optional functionality like
- autoreset has been requested using kwargs to init()
- '''
- return self.convert or self.strip or self.autoreset
-
- def get_win32_calls(self):
- if self.convert and winterm:
- return {
- AnsiStyle.RESET_ALL: (winterm.reset_all, ),
- AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
- AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
- AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
- AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
- AnsiFore.RED: (winterm.fore, WinColor.RED),
- AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
- AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
- AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
- AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
- AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
- AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
- AnsiFore.RESET: (winterm.fore, ),
- AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
- AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
- AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
- AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
- AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
- AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
- AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
- AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
- AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
- AnsiBack.RED: (winterm.back, WinColor.RED),
- AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
- AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
- AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
- AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
- AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
- AnsiBack.WHITE: (winterm.back, WinColor.GREY),
- AnsiBack.RESET: (winterm.back, ),
- AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
- AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
- AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
- AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
- AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
- AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
- AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
- AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
- }
- return dict()
-
- def write(self, text):
- if self.strip or self.convert:
- self.write_and_convert(text)
- else:
- self.wrapped.write(text)
- self.wrapped.flush()
- if self.autoreset:
- self.reset_all()
-
-
- def reset_all(self):
- if self.convert:
- self.call_win32('m', (0,))
- elif not self.strip and not is_stream_closed(self.wrapped):
- self.wrapped.write(Style.RESET_ALL)
-
-
- def write_and_convert(self, text):
- '''
- Write the given text to our wrapped stream, stripping any ANSI
- sequences from the text, and optionally converting them into win32
- calls.
- '''
- cursor = 0
- text = self.convert_osc(text)
- for match in self.ANSI_CSI_RE.finditer(text):
- start, end = match.span()
- self.write_plain_text(text, cursor, start)
- self.convert_ansi(*match.groups())
- cursor = end
- self.write_plain_text(text, cursor, len(text))
-
-
- def write_plain_text(self, text, start, end):
- if start < end:
- self.wrapped.write(text[start:end])
- self.wrapped.flush()
-
-
- def convert_ansi(self, paramstring, command):
- if self.convert:
- params = self.extract_params(command, paramstring)
- self.call_win32(command, params)
-
-
- def extract_params(self, command, paramstring):
- if command in 'Hf':
- params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
- while len(params) < 2:
- # defaults:
- params = params + (1,)
- else:
- params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
- if len(params) == 0:
- # defaults:
- if command in 'JKm':
- params = (0,)
- elif command in 'ABCD':
- params = (1,)
-
- return params
-
-
- def call_win32(self, command, params):
- if command == 'm':
- for param in params:
- if param in self.win32_calls:
- func_args = self.win32_calls[param]
- func = func_args[0]
- args = func_args[1:]
- kwargs = dict(on_stderr=self.on_stderr)
- func(*args, **kwargs)
- elif command in 'J':
- winterm.erase_screen(params[0], on_stderr=self.on_stderr)
- elif command in 'K':
- winterm.erase_line(params[0], on_stderr=self.on_stderr)
- elif command in 'Hf': # cursor position - absolute
- winterm.set_cursor_position(params, on_stderr=self.on_stderr)
- elif command in 'ABCD': # cursor position - relative
- n = params[0]
- # A - up, B - down, C - forward, D - back
- x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
- winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
-
-
- def convert_osc(self, text):
- for match in self.ANSI_OSC_RE.finditer(text):
- start, end = match.span()
- text = text[:start] + text[end:]
- paramstring, command = match.groups()
- if command in '\x07': # \x07 = BEL
- params = paramstring.split(";")
- # 0 - change title and icon (we will only change title)
- # 1 - change icon (we don't support this)
- # 2 - change title
- if params[0] in '02':
- winterm.set_title(params[1])
- return text
diff --git a/env/Lib/site-packages/pip/_vendor/colorama/initialise.py b/env/Lib/site-packages/pip/_vendor/colorama/initialise.py
deleted file mode 100644
index 834962a..0000000
--- a/env/Lib/site-packages/pip/_vendor/colorama/initialise.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
-import atexit
-import contextlib
-import sys
-
-from .ansitowin32 import AnsiToWin32
-
-
-orig_stdout = None
-orig_stderr = None
-
-wrapped_stdout = None
-wrapped_stderr = None
-
-atexit_done = False
-
-
-def reset_all():
- if AnsiToWin32 is not None: # Issue #74: objects might become None at exit
- AnsiToWin32(orig_stdout).reset_all()
-
-
-def init(autoreset=False, convert=None, strip=None, wrap=True):
-
- if not wrap and any([autoreset, convert, strip]):
- raise ValueError('wrap=False conflicts with any other arg=True')
-
- global wrapped_stdout, wrapped_stderr
- global orig_stdout, orig_stderr
-
- orig_stdout = sys.stdout
- orig_stderr = sys.stderr
-
- if sys.stdout is None:
- wrapped_stdout = None
- else:
- sys.stdout = wrapped_stdout = \
- wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
- if sys.stderr is None:
- wrapped_stderr = None
- else:
- sys.stderr = wrapped_stderr = \
- wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
-
- global atexit_done
- if not atexit_done:
- atexit.register(reset_all)
- atexit_done = True
-
-
-def deinit():
- if orig_stdout is not None:
- sys.stdout = orig_stdout
- if orig_stderr is not None:
- sys.stderr = orig_stderr
-
-
-@contextlib.contextmanager
-def colorama_text(*args, **kwargs):
- init(*args, **kwargs)
- try:
- yield
- finally:
- deinit()
-
-
-def reinit():
- if wrapped_stdout is not None:
- sys.stdout = wrapped_stdout
- if wrapped_stderr is not None:
- sys.stderr = wrapped_stderr
-
-
-def wrap_stream(stream, convert, strip, autoreset, wrap):
- if wrap:
- wrapper = AnsiToWin32(stream,
- convert=convert, strip=strip, autoreset=autoreset)
- if wrapper.should_wrap():
- stream = wrapper.stream
- return stream
-
-
diff --git a/env/Lib/site-packages/pip/_vendor/colorama/win32.py b/env/Lib/site-packages/pip/_vendor/colorama/win32.py
deleted file mode 100644
index 3d1d2f2..0000000
--- a/env/Lib/site-packages/pip/_vendor/colorama/win32.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
-
-# from winbase.h
-STDOUT = -11
-STDERR = -12
-
-try:
- import ctypes
- from ctypes import LibraryLoader
- windll = LibraryLoader(ctypes.WinDLL)
- from ctypes import wintypes
-except (AttributeError, ImportError):
- windll = None
- SetConsoleTextAttribute = lambda *_: None
- winapi_test = lambda *_: None
-else:
- from ctypes import byref, Structure, c_char, POINTER
-
- COORD = wintypes._COORD
-
- class CONSOLE_SCREEN_BUFFER_INFO(Structure):
- """struct in wincon.h."""
- _fields_ = [
- ("dwSize", COORD),
- ("dwCursorPosition", COORD),
- ("wAttributes", wintypes.WORD),
- ("srWindow", wintypes.SMALL_RECT),
- ("dwMaximumWindowSize", COORD),
- ]
- def __str__(self):
- return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
- self.dwSize.Y, self.dwSize.X
- , self.dwCursorPosition.Y, self.dwCursorPosition.X
- , self.wAttributes
- , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
- , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
- )
-
- _GetStdHandle = windll.kernel32.GetStdHandle
- _GetStdHandle.argtypes = [
- wintypes.DWORD,
- ]
- _GetStdHandle.restype = wintypes.HANDLE
-
- _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
- _GetConsoleScreenBufferInfo.argtypes = [
- wintypes.HANDLE,
- POINTER(CONSOLE_SCREEN_BUFFER_INFO),
- ]
- _GetConsoleScreenBufferInfo.restype = wintypes.BOOL
-
- _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
- _SetConsoleTextAttribute.argtypes = [
- wintypes.HANDLE,
- wintypes.WORD,
- ]
- _SetConsoleTextAttribute.restype = wintypes.BOOL
-
- _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
- _SetConsoleCursorPosition.argtypes = [
- wintypes.HANDLE,
- COORD,
- ]
- _SetConsoleCursorPosition.restype = wintypes.BOOL
-
- _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
- _FillConsoleOutputCharacterA.argtypes = [
- wintypes.HANDLE,
- c_char,
- wintypes.DWORD,
- COORD,
- POINTER(wintypes.DWORD),
- ]
- _FillConsoleOutputCharacterA.restype = wintypes.BOOL
-
- _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
- _FillConsoleOutputAttribute.argtypes = [
- wintypes.HANDLE,
- wintypes.WORD,
- wintypes.DWORD,
- COORD,
- POINTER(wintypes.DWORD),
- ]
- _FillConsoleOutputAttribute.restype = wintypes.BOOL
-
- _SetConsoleTitleW = windll.kernel32.SetConsoleTitleA
- _SetConsoleTitleW.argtypes = [
- wintypes.LPCSTR
- ]
- _SetConsoleTitleW.restype = wintypes.BOOL
-
- handles = {
- STDOUT: _GetStdHandle(STDOUT),
- STDERR: _GetStdHandle(STDERR),
- }
-
- def winapi_test():
- handle = handles[STDOUT]
- csbi = CONSOLE_SCREEN_BUFFER_INFO()
- success = _GetConsoleScreenBufferInfo(
- handle, byref(csbi))
- return bool(success)
-
- def GetConsoleScreenBufferInfo(stream_id=STDOUT):
- handle = handles[stream_id]
- csbi = CONSOLE_SCREEN_BUFFER_INFO()
- success = _GetConsoleScreenBufferInfo(
- handle, byref(csbi))
- return csbi
-
- def SetConsoleTextAttribute(stream_id, attrs):
- handle = handles[stream_id]
- return _SetConsoleTextAttribute(handle, attrs)
-
- def SetConsoleCursorPosition(stream_id, position, adjust=True):
- position = COORD(*position)
- # If the position is out of range, do nothing.
- if position.Y <= 0 or position.X <= 0:
- return
- # Adjust for Windows' SetConsoleCursorPosition:
- # 1. being 0-based, while ANSI is 1-based.
- # 2. expecting (x,y), while ANSI uses (y,x).
- adjusted_position = COORD(position.Y - 1, position.X - 1)
- if adjust:
- # Adjust for viewport's scroll position
- sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
- adjusted_position.Y += sr.Top
- adjusted_position.X += sr.Left
- # Resume normal processing
- handle = handles[stream_id]
- return _SetConsoleCursorPosition(handle, adjusted_position)
-
- def FillConsoleOutputCharacter(stream_id, char, length, start):
- handle = handles[stream_id]
- char = c_char(char.encode())
- length = wintypes.DWORD(length)
- num_written = wintypes.DWORD(0)
- # Note that this is hard-coded for ANSI (vs wide) bytes.
- success = _FillConsoleOutputCharacterA(
- handle, char, length, start, byref(num_written))
- return num_written.value
-
- def FillConsoleOutputAttribute(stream_id, attr, length, start):
- ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
- handle = handles[stream_id]
- attribute = wintypes.WORD(attr)
- length = wintypes.DWORD(length)
- num_written = wintypes.DWORD(0)
- # Note that this is hard-coded for ANSI (vs wide) bytes.
- return _FillConsoleOutputAttribute(
- handle, attribute, length, start, byref(num_written))
-
- def SetConsoleTitle(title):
- return _SetConsoleTitleW(title)
diff --git a/env/Lib/site-packages/pip/_vendor/colorama/winterm.py b/env/Lib/site-packages/pip/_vendor/colorama/winterm.py
deleted file mode 100644
index 60309d3..0000000
--- a/env/Lib/site-packages/pip/_vendor/colorama/winterm.py
+++ /dev/null
@@ -1,162 +0,0 @@
-# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
-from . import win32
-
-
-# from wincon.h
-class WinColor(object):
- BLACK = 0
- BLUE = 1
- GREEN = 2
- CYAN = 3
- RED = 4
- MAGENTA = 5
- YELLOW = 6
- GREY = 7
-
-# from wincon.h
-class WinStyle(object):
- NORMAL = 0x00 # dim text, dim background
- BRIGHT = 0x08 # bright text, dim background
- BRIGHT_BACKGROUND = 0x80 # dim text, bright background
-
-class WinTerm(object):
-
- def __init__(self):
- self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
- self.set_attrs(self._default)
- self._default_fore = self._fore
- self._default_back = self._back
- self._default_style = self._style
- # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.
- # So that LIGHT_EX colors and BRIGHT style do not clobber each other,
- # we track them separately, since LIGHT_EX is overwritten by Fore/Back
- # and BRIGHT is overwritten by Style codes.
- self._light = 0
-
- def get_attrs(self):
- return self._fore + self._back * 16 + (self._style | self._light)
-
- def set_attrs(self, value):
- self._fore = value & 7
- self._back = (value >> 4) & 7
- self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
-
- def reset_all(self, on_stderr=None):
- self.set_attrs(self._default)
- self.set_console(attrs=self._default)
-
- def fore(self, fore=None, light=False, on_stderr=False):
- if fore is None:
- fore = self._default_fore
- self._fore = fore
- # Emulate LIGHT_EX with BRIGHT Style
- if light:
- self._light |= WinStyle.BRIGHT
- else:
- self._light &= ~WinStyle.BRIGHT
- self.set_console(on_stderr=on_stderr)
-
- def back(self, back=None, light=False, on_stderr=False):
- if back is None:
- back = self._default_back
- self._back = back
- # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style
- if light:
- self._light |= WinStyle.BRIGHT_BACKGROUND
- else:
- self._light &= ~WinStyle.BRIGHT_BACKGROUND
- self.set_console(on_stderr=on_stderr)
-
- def style(self, style=None, on_stderr=False):
- if style is None:
- style = self._default_style
- self._style = style
- self.set_console(on_stderr=on_stderr)
-
- def set_console(self, attrs=None, on_stderr=False):
- if attrs is None:
- attrs = self.get_attrs()
- handle = win32.STDOUT
- if on_stderr:
- handle = win32.STDERR
- win32.SetConsoleTextAttribute(handle, attrs)
-
- def get_position(self, handle):
- position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
- # Because Windows coordinates are 0-based,
- # and win32.SetConsoleCursorPosition expects 1-based.
- position.X += 1
- position.Y += 1
- return position
-
- def set_cursor_position(self, position=None, on_stderr=False):
- if position is None:
- # I'm not currently tracking the position, so there is no default.
- # position = self.get_position()
- return
- handle = win32.STDOUT
- if on_stderr:
- handle = win32.STDERR
- win32.SetConsoleCursorPosition(handle, position)
-
- def cursor_adjust(self, x, y, on_stderr=False):
- handle = win32.STDOUT
- if on_stderr:
- handle = win32.STDERR
- position = self.get_position(handle)
- adjusted_position = (position.Y + y, position.X + x)
- win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
-
- def erase_screen(self, mode=0, on_stderr=False):
- # 0 should clear from the cursor to the end of the screen.
- # 1 should clear from the cursor to the beginning of the screen.
- # 2 should clear the entire screen, and move cursor to (1,1)
- handle = win32.STDOUT
- if on_stderr:
- handle = win32.STDERR
- csbi = win32.GetConsoleScreenBufferInfo(handle)
- # get the number of character cells in the current buffer
- cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
- # get number of character cells before current cursor position
- cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
- if mode == 0:
- from_coord = csbi.dwCursorPosition
- cells_to_erase = cells_in_screen - cells_before_cursor
- if mode == 1:
- from_coord = win32.COORD(0, 0)
- cells_to_erase = cells_before_cursor
- elif mode == 2:
- from_coord = win32.COORD(0, 0)
- cells_to_erase = cells_in_screen
- # fill the entire screen with blanks
- win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
- # now set the buffer's attributes accordingly
- win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
- if mode == 2:
- # put the cursor where needed
- win32.SetConsoleCursorPosition(handle, (1, 1))
-
- def erase_line(self, mode=0, on_stderr=False):
- # 0 should clear from the cursor to the end of the line.
- # 1 should clear from the cursor to the beginning of the line.
- # 2 should clear the entire line.
- handle = win32.STDOUT
- if on_stderr:
- handle = win32.STDERR
- csbi = win32.GetConsoleScreenBufferInfo(handle)
- if mode == 0:
- from_coord = csbi.dwCursorPosition
- cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
- if mode == 1:
- from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
- cells_to_erase = csbi.dwCursorPosition.X
- elif mode == 2:
- from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
- cells_to_erase = csbi.dwSize.X
- # fill the entire screen with blanks
- win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
- # now set the buffer's attributes accordingly
- win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
-
- def set_title(self, title):
- win32.SetConsoleTitle(title)
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/__init__.py b/env/Lib/site-packages/pip/_vendor/distlib/__init__.py
deleted file mode 100644
index d186b0a..0000000
--- a/env/Lib/site-packages/pip/_vendor/distlib/__init__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2012-2016 Vinay Sajip.
-# Licensed to the Python Software Foundation under a contributor agreement.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-import logging
-
-__version__ = '0.2.4'
-
-class DistlibException(Exception):
- pass
-
-try:
- from logging import NullHandler
-except ImportError: # pragma: no cover
- class NullHandler(logging.Handler):
- def handle(self, record): pass
- def emit(self, record): pass
- def createLock(self): self.lock = None
-
-logger = logging.getLogger(__name__)
-logger.addHandler(NullHandler())
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/__init__.py b/env/Lib/site-packages/pip/_vendor/distlib/_backport/__init__.py
deleted file mode 100644
index f7dbf4c..0000000
--- a/env/Lib/site-packages/pip/_vendor/distlib/_backport/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-"""Modules copied from Python 3 standard libraries, for internal use only.
-
-Individual classes and functions are found in d2._backport.misc. Intended
-usage is to always import things missing from 3.1 from that module: the
-built-in/stdlib objects will be used if found.
-"""
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/misc.py b/env/Lib/site-packages/pip/_vendor/distlib/_backport/misc.py
deleted file mode 100644
index cfb318d..0000000
--- a/env/Lib/site-packages/pip/_vendor/distlib/_backport/misc.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2012 The Python Software Foundation.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-"""Backports for individual classes and functions."""
-
-import os
-import sys
-
-__all__ = ['cache_from_source', 'callable', 'fsencode']
-
-
-try:
- from imp import cache_from_source
-except ImportError:
- def cache_from_source(py_file, debug=__debug__):
- ext = debug and 'c' or 'o'
- return py_file + ext
-
-
-try:
- callable = callable
-except NameError:
- from collections import Callable
-
- def callable(obj):
- return isinstance(obj, Callable)
-
-
-try:
- fsencode = os.fsencode
-except AttributeError:
- def fsencode(filename):
- if isinstance(filename, bytes):
- return filename
- elif isinstance(filename, str):
- return filename.encode(sys.getfilesystemencoding())
- else:
- raise TypeError("expect bytes or str, not %s" %
- type(filename).__name__)
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py b/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py
deleted file mode 100644
index 159e49e..0000000
--- a/env/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py
+++ /dev/null
@@ -1,761 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2012 The Python Software Foundation.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-"""Utility functions for copying and archiving files and directory trees.
-
-XXX The functions here don't copy the resource fork or other metadata on Mac.
-
-"""
-
-import os
-import sys
-import stat
-from os.path import abspath
-import fnmatch
-import collections
-import errno
-from . import tarfile
-
-try:
- import bz2
- _BZ2_SUPPORTED = True
-except ImportError:
- _BZ2_SUPPORTED = False
-
-try:
- from pwd import getpwnam
-except ImportError:
- getpwnam = None
-
-try:
- from grp import getgrnam
-except ImportError:
- getgrnam = None
-
-__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
- "copytree", "move", "rmtree", "Error", "SpecialFileError",
- "ExecError", "make_archive", "get_archive_formats",
- "register_archive_format", "unregister_archive_format",
- "get_unpack_formats", "register_unpack_format",
- "unregister_unpack_format", "unpack_archive", "ignore_patterns"]
-
-class Error(EnvironmentError):
- pass
-
-class SpecialFileError(EnvironmentError):
- """Raised when trying to do a kind of operation (e.g. copying) which is
- not supported on a special file (e.g. a named pipe)"""
-
-class ExecError(EnvironmentError):
- """Raised when a command could not be executed"""
-
-class ReadError(EnvironmentError):
- """Raised when an archive cannot be read"""
-
-class RegistryError(Exception):
- """Raised when a registry operation with the archiving
- and unpacking registries fails"""
-
-
-try:
- WindowsError
-except NameError:
- WindowsError = None
-
-def copyfileobj(fsrc, fdst, length=16*1024):
- """copy data from file-like object fsrc to file-like object fdst"""
- while 1:
- buf = fsrc.read(length)
- if not buf:
- break
- fdst.write(buf)
-
-def _samefile(src, dst):
- # Macintosh, Unix.
- if hasattr(os.path, 'samefile'):
- try:
- return os.path.samefile(src, dst)
- except OSError:
- return False
-
- # All other platforms: check for same pathname.
- return (os.path.normcase(os.path.abspath(src)) ==
- os.path.normcase(os.path.abspath(dst)))
-
-def copyfile(src, dst):
- """Copy data from src to dst"""
- if _samefile(src, dst):
- raise Error("`%s` and `%s` are the same file" % (src, dst))
-
- for fn in [src, dst]:
- try:
- st = os.stat(fn)
- except OSError:
- # File most likely does not exist
- pass
- else:
- # XXX What about other special files? (sockets, devices...)
- if stat.S_ISFIFO(st.st_mode):
- raise SpecialFileError("`%s` is a named pipe" % fn)
-
- with open(src, 'rb') as fsrc:
- with open(dst, 'wb') as fdst:
- copyfileobj(fsrc, fdst)
-
-def copymode(src, dst):
- """Copy mode bits from src to dst"""
- if hasattr(os, 'chmod'):
- st = os.stat(src)
- mode = stat.S_IMODE(st.st_mode)
- os.chmod(dst, mode)
-
-def copystat(src, dst):
- """Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
- st = os.stat(src)
- mode = stat.S_IMODE(st.st_mode)
- if hasattr(os, 'utime'):
- os.utime(dst, (st.st_atime, st.st_mtime))
- if hasattr(os, 'chmod'):
- os.chmod(dst, mode)
- if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
- try:
- os.chflags(dst, st.st_flags)
- except OSError as why:
- if (not hasattr(errno, 'EOPNOTSUPP') or
- why.errno != errno.EOPNOTSUPP):
- raise
-
-def copy(src, dst):
- """Copy data and mode bits ("cp src dst").
-
- The destination may be a directory.
-
- """
- if os.path.isdir(dst):
- dst = os.path.join(dst, os.path.basename(src))
- copyfile(src, dst)
- copymode(src, dst)
-
-def copy2(src, dst):
- """Copy data and all stat info ("cp -p src dst").
-
- The destination may be a directory.
-
- """
- if os.path.isdir(dst):
- dst = os.path.join(dst, os.path.basename(src))
- copyfile(src, dst)
- copystat(src, dst)
-
-def ignore_patterns(*patterns):
- """Function that can be used as copytree() ignore parameter.
-
- Patterns is a sequence of glob-style patterns
- that are used to exclude files"""
- def _ignore_patterns(path, names):
- ignored_names = []
- for pattern in patterns:
- ignored_names.extend(fnmatch.filter(names, pattern))
- return set(ignored_names)
- return _ignore_patterns
-
-def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
- ignore_dangling_symlinks=False):
- """Recursively copy a directory tree.
-
- The destination directory must not already exist.
- If exception(s) occur, an Error is raised with a list of reasons.
-
- If the optional symlinks flag is true, symbolic links in the
- source tree result in symbolic links in the destination tree; if
- it is false, the contents of the files pointed to by symbolic
- links are copied. If the file pointed by the symlink doesn't
- exist, an exception will be added in the list of errors raised in
- an Error exception at the end of the copy process.
-
- You can set the optional ignore_dangling_symlinks flag to true if you
- want to silence this exception. Notice that this has no effect on
- platforms that don't support os.symlink.
-
- The optional ignore argument is a callable. If given, it
- is called with the `src` parameter, which is the directory
- being visited by copytree(), and `names` which is the list of
- `src` contents, as returned by os.listdir():
-
- callable(src, names) -> ignored_names
-
- Since copytree() is called recursively, the callable will be
- called once for each directory that is copied. It returns a
- list of names relative to the `src` directory that should
- not be copied.
-
- The optional copy_function argument is a callable that will be used
- to copy each file. It will be called with the source path and the
- destination path as arguments. By default, copy2() is used, but any
- function that supports the same signature (like copy()) can be used.
-
- """
- names = os.listdir(src)
- if ignore is not None:
- ignored_names = ignore(src, names)
- else:
- ignored_names = set()
-
- os.makedirs(dst)
- errors = []
- for name in names:
- if name in ignored_names:
- continue
- srcname = os.path.join(src, name)
- dstname = os.path.join(dst, name)
- try:
- if os.path.islink(srcname):
- linkto = os.readlink(srcname)
- if symlinks:
- os.symlink(linkto, dstname)
- else:
- # ignore dangling symlink if the flag is on
- if not os.path.exists(linkto) and ignore_dangling_symlinks:
- continue
- # otherwise let the copy occurs. copy2 will raise an error
- copy_function(srcname, dstname)
- elif os.path.isdir(srcname):
- copytree(srcname, dstname, symlinks, ignore, copy_function)
- else:
- # Will raise a SpecialFileError for unsupported file types
- copy_function(srcname, dstname)
- # catch the Error from the recursive copytree so that we can
- # continue with other files
- except Error as err:
- errors.extend(err.args[0])
- except EnvironmentError as why:
- errors.append((srcname, dstname, str(why)))
- try:
- copystat(src, dst)
- except OSError as why:
- if WindowsError is not None and isinstance(why, WindowsError):
- # Copying file access times may fail on Windows
- pass
- else:
- errors.extend((src, dst, str(why)))
- if errors:
- raise Error(errors)
-
-def rmtree(path, ignore_errors=False, onerror=None):
- """Recursively delete a directory tree.
-
- If ignore_errors is set, errors are ignored; otherwise, if onerror
- is set, it is called to handle the error with arguments (func,
- path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
- path is the argument to that function that caused it to fail; and
- exc_info is a tuple returned by sys.exc_info(). If ignore_errors
- is false and onerror is None, an exception is raised.
-
- """
- if ignore_errors:
- def onerror(*args):
- pass
- elif onerror is None:
- def onerror(*args):
- raise
- try:
- if os.path.islink(path):
- # symlinks to directories are forbidden, see bug #1669
- raise OSError("Cannot call rmtree on a symbolic link")
- except OSError:
- onerror(os.path.islink, path, sys.exc_info())
- # can't continue even if onerror hook returns
- return
- names = []
- try:
- names = os.listdir(path)
- except os.error:
- onerror(os.listdir, path, sys.exc_info())
- for name in names:
- fullname = os.path.join(path, name)
- try:
- mode = os.lstat(fullname).st_mode
- except os.error:
- mode = 0
- if stat.S_ISDIR(mode):
- rmtree(fullname, ignore_errors, onerror)
- else:
- try:
- os.remove(fullname)
- except os.error:
- onerror(os.remove, fullname, sys.exc_info())
- try:
- os.rmdir(path)
- except os.error:
- onerror(os.rmdir, path, sys.exc_info())
-
-
-def _basename(path):
- # A basename() variant which first strips the trailing slash, if present.
- # Thus we always get the last component of the path, even for directories.
- return os.path.basename(path.rstrip(os.path.sep))
-
-def move(src, dst):
- """Recursively move a file or directory to another location. This is
- similar to the Unix "mv" command.
-
- If the destination is a directory or a symlink to a directory, the source
- is moved inside the directory. The destination path must not already
- exist.
-
- If the destination already exists but is not a directory, it may be
- overwritten depending on os.rename() semantics.
-
- If the destination is on our current filesystem, then rename() is used.
- Otherwise, src is copied to the destination and then removed.
- A lot more could be done here... A look at a mv.c shows a lot of
- the issues this implementation glosses over.
-
- """
- real_dst = dst
- if os.path.isdir(dst):
- if _samefile(src, dst):
- # We might be on a case insensitive filesystem,
- # perform the rename anyway.
- os.rename(src, dst)
- return
-
- real_dst = os.path.join(dst, _basename(src))
- if os.path.exists(real_dst):
- raise Error("Destination path '%s' already exists" % real_dst)
- try:
- os.rename(src, real_dst)
- except OSError:
- if os.path.isdir(src):
- if _destinsrc(src, dst):
- raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
- copytree(src, real_dst, symlinks=True)
- rmtree(src)
- else:
- copy2(src, real_dst)
- os.unlink(src)
-
-def _destinsrc(src, dst):
- src = abspath(src)
- dst = abspath(dst)
- if not src.endswith(os.path.sep):
- src += os.path.sep
- if not dst.endswith(os.path.sep):
- dst += os.path.sep
- return dst.startswith(src)
-
-def _get_gid(name):
- """Returns a gid, given a group name."""
- if getgrnam is None or name is None:
- return None
- try:
- result = getgrnam(name)
- except KeyError:
- result = None
- if result is not None:
- return result[2]
- return None
-
-def _get_uid(name):
- """Returns an uid, given a user name."""
- if getpwnam is None or name is None:
- return None
- try:
- result = getpwnam(name)
- except KeyError:
- result = None
- if result is not None:
- return result[2]
- return None
-
-def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
- owner=None, group=None, logger=None):
- """Create a (possibly compressed) tar file from all the files under
- 'base_dir'.
-
- 'compress' must be "gzip" (the default), "bzip2", or None.
-
- 'owner' and 'group' can be used to define an owner and a group for the
- archive that is being built. If not provided, the current owner and group
- will be used.
-
- The output tar file will be named 'base_name' + ".tar", possibly plus
- the appropriate compression extension (".gz", or ".bz2").
-
- Returns the output filename.
- """
- tar_compression = {'gzip': 'gz', None: ''}
- compress_ext = {'gzip': '.gz'}
-
- if _BZ2_SUPPORTED:
- tar_compression['bzip2'] = 'bz2'
- compress_ext['bzip2'] = '.bz2'
-
- # flags for compression program, each element of list will be an argument
- if compress is not None and compress not in compress_ext:
- raise ValueError("bad value for 'compress', or compression format not "
- "supported : {0}".format(compress))
-
- archive_name = base_name + '.tar' + compress_ext.get(compress, '')
- archive_dir = os.path.dirname(archive_name)
-
- if not os.path.exists(archive_dir):
- if logger is not None:
- logger.info("creating %s", archive_dir)
- if not dry_run:
- os.makedirs(archive_dir)
-
- # creating the tarball
- if logger is not None:
- logger.info('Creating tar archive')
-
- uid = _get_uid(owner)
- gid = _get_gid(group)
-
- def _set_uid_gid(tarinfo):
- if gid is not None:
- tarinfo.gid = gid
- tarinfo.gname = group
- if uid is not None:
- tarinfo.uid = uid
- tarinfo.uname = owner
- return tarinfo
-
- if not dry_run:
- tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
- try:
- tar.add(base_dir, filter=_set_uid_gid)
- finally:
- tar.close()
-
- return archive_name
-
-def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
- # XXX see if we want to keep an external call here
- if verbose:
- zipoptions = "-r"
- else:
- zipoptions = "-rq"
- from distutils.errors import DistutilsExecError
- from distutils.spawn import spawn
- try:
- spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
- except DistutilsExecError:
- # XXX really should distinguish between "couldn't find
- # external 'zip' command" and "zip failed".
- raise ExecError("unable to create zip file '%s': "
- "could neither import the 'zipfile' module nor "
- "find a standalone zip utility") % zip_filename
-
-def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
- """Create a zip file from all the files under 'base_dir'.
-
- The output zip file will be named 'base_name' + ".zip". Uses either the
- "zipfile" Python module (if available) or the InfoZIP "zip" utility
- (if installed and found on the default search path). If neither tool is
- available, raises ExecError. Returns the name of the output zip
- file.
- """
- zip_filename = base_name + ".zip"
- archive_dir = os.path.dirname(base_name)
-
- if not os.path.exists(archive_dir):
- if logger is not None:
- logger.info("creating %s", archive_dir)
- if not dry_run:
- os.makedirs(archive_dir)
-
- # If zipfile module is not available, try spawning an external 'zip'
- # command.
- try:
- import zipfile
- except ImportError:
- zipfile = None
-
- if zipfile is None:
- _call_external_zip(base_dir, zip_filename, verbose, dry_run)
- else:
- if logger is not None:
- logger.info("creating '%s' and adding '%s' to it",
- zip_filename, base_dir)
-
- if not dry_run:
- zip = zipfile.ZipFile(zip_filename, "w",
- compression=zipfile.ZIP_DEFLATED)
-
- for dirpath, dirnames, filenames in os.walk(base_dir):
- for name in filenames:
- path = os.path.normpath(os.path.join(dirpath, name))
- if os.path.isfile(path):
- zip.write(path, path)
- if logger is not None:
- logger.info("adding '%s'", path)
- zip.close()
-
- return zip_filename
-
-_ARCHIVE_FORMATS = {
- 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
- 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
- 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
- 'zip': (_make_zipfile, [], "ZIP file"),
- }
-
-if _BZ2_SUPPORTED:
- _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
- "bzip2'ed tar-file")
-
-def get_archive_formats():
- """Returns a list of supported formats for archiving and unarchiving.
-
- Each element of the returned sequence is a tuple (name, description)
- """
- formats = [(name, registry[2]) for name, registry in
- _ARCHIVE_FORMATS.items()]
- formats.sort()
- return formats
-
-def register_archive_format(name, function, extra_args=None, description=''):
- """Registers an archive format.
-
- name is the name of the format. function is the callable that will be
- used to create archives. If provided, extra_args is a sequence of
- (name, value) tuples that will be passed as arguments to the callable.
- description can be provided to describe the format, and will be returned
- by the get_archive_formats() function.
- """
- if extra_args is None:
- extra_args = []
- if not isinstance(function, collections.Callable):
- raise TypeError('The %s object is not callable' % function)
- if not isinstance(extra_args, (tuple, list)):
- raise TypeError('extra_args needs to be a sequence')
- for element in extra_args:
- if not isinstance(element, (tuple, list)) or len(element) !=2:
- raise TypeError('extra_args elements are : (arg_name, value)')
-
- _ARCHIVE_FORMATS[name] = (function, extra_args, description)
-
-def unregister_archive_format(name):
- del _ARCHIVE_FORMATS[name]
-
-def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
- dry_run=0, owner=None, group=None, logger=None):
- """Create an archive file (eg. zip or tar).
-
- 'base_name' is the name of the file to create, minus any format-specific
- extension; 'format' is the archive format: one of "zip", "tar", "bztar"
- or "gztar".
-
- 'root_dir' is a directory that will be the root directory of the
- archive; ie. we typically chdir into 'root_dir' before creating the
- archive. 'base_dir' is the directory where we start archiving from;
- ie. 'base_dir' will be the common prefix of all files and
- directories in the archive. 'root_dir' and 'base_dir' both default
- to the current directory. Returns the name of the archive file.
-
- 'owner' and 'group' are used when creating a tar archive. By default,
- uses the current owner and group.
- """
- save_cwd = os.getcwd()
- if root_dir is not None:
- if logger is not None:
- logger.debug("changing into '%s'", root_dir)
- base_name = os.path.abspath(base_name)
- if not dry_run:
- os.chdir(root_dir)
-
- if base_dir is None:
- base_dir = os.curdir
-
- kwargs = {'dry_run': dry_run, 'logger': logger}
-
- try:
- format_info = _ARCHIVE_FORMATS[format]
- except KeyError:
- raise ValueError("unknown archive format '%s'" % format)
-
- func = format_info[0]
- for arg, val in format_info[1]:
- kwargs[arg] = val
-
- if format != 'zip':
- kwargs['owner'] = owner
- kwargs['group'] = group
-
- try:
- filename = func(base_name, base_dir, **kwargs)
- finally:
- if root_dir is not None:
- if logger is not None:
- logger.debug("changing back to '%s'", save_cwd)
- os.chdir(save_cwd)
-
- return filename
-
-
-def get_unpack_formats():
- """Returns a list of supported formats for unpacking.
-
- Each element of the returned sequence is a tuple
- (name, extensions, description)
- """
- formats = [(name, info[0], info[3]) for name, info in
- _UNPACK_FORMATS.items()]
- formats.sort()
- return formats
-
-def _check_unpack_options(extensions, function, extra_args):
- """Checks what gets registered as an unpacker."""
- # first make sure no other unpacker is registered for this extension
- existing_extensions = {}
- for name, info in _UNPACK_FORMATS.items():
- for ext in info[0]:
- existing_extensions[ext] = name
-
- for extension in extensions:
- if extension in existing_extensions:
- msg = '%s is already registered for "%s"'
- raise RegistryError(msg % (extension,
- existing_extensions[extension]))
-
- if not isinstance(function, collections.Callable):
- raise TypeError('The registered function must be a callable')
-
-
-def register_unpack_format(name, extensions, function, extra_args=None,
- description=''):
- """Registers an unpack format.
-
- `name` is the name of the format. `extensions` is a list of extensions
- corresponding to the format.
-
- `function` is the callable that will be
- used to unpack archives. The callable will receive archives to unpack.
- If it's unable to handle an archive, it needs to raise a ReadError
- exception.
-
- If provided, `extra_args` is a sequence of
- (name, value) tuples that will be passed as arguments to the callable.
- description can be provided to describe the format, and will be returned
- by the get_unpack_formats() function.
- """
- if extra_args is None:
- extra_args = []
- _check_unpack_options(extensions, function, extra_args)
- _UNPACK_FORMATS[name] = extensions, function, extra_args, description
-
-def unregister_unpack_format(name):
- """Removes the pack format from the registry."""
- del _UNPACK_FORMATS[name]
-
-def _ensure_directory(path):
- """Ensure that the parent directory of `path` exists"""
- dirname = os.path.dirname(path)
- if not os.path.isdir(dirname):
- os.makedirs(dirname)
-
-def _unpack_zipfile(filename, extract_dir):
- """Unpack zip `filename` to `extract_dir`
- """
- try:
- import zipfile
- except ImportError:
- raise ReadError('zlib not supported, cannot unpack this archive.')
-
- if not zipfile.is_zipfile(filename):
- raise ReadError("%s is not a zip file" % filename)
-
- zip = zipfile.ZipFile(filename)
- try:
- for info in zip.infolist():
- name = info.filename
-
- # don't extract absolute paths or ones with .. in them
- if name.startswith('/') or '..' in name:
- continue
-
- target = os.path.join(extract_dir, *name.split('/'))
- if not target:
- continue
-
- _ensure_directory(target)
- if not name.endswith('/'):
- # file
- data = zip.read(info.filename)
- f = open(target, 'wb')
- try:
- f.write(data)
- finally:
- f.close()
- del data
- finally:
- zip.close()
-
-def _unpack_tarfile(filename, extract_dir):
- """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
- """
- try:
- tarobj = tarfile.open(filename)
- except tarfile.TarError:
- raise ReadError(
- "%s is not a compressed or uncompressed tar file" % filename)
- try:
- tarobj.extractall(extract_dir)
- finally:
- tarobj.close()
-
-_UNPACK_FORMATS = {
- 'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
- 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
- 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
- }
-
-if _BZ2_SUPPORTED:
- _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
- "bzip2'ed tar-file")
-
-def _find_unpack_format(filename):
- for name, info in _UNPACK_FORMATS.items():
- for extension in info[0]:
- if filename.endswith(extension):
- return name
- return None
-
-def unpack_archive(filename, extract_dir=None, format=None):
- """Unpack an archive.
-
- `filename` is the name of the archive.
-
- `extract_dir` is the name of the target directory, where the archive
- is unpacked. If not provided, the current working directory is used.
-
- `format` is the archive format: one of "zip", "tar", or "gztar". Or any
- other registered format. If not provided, unpack_archive will use the
- filename extension and see if an unpacker was registered for that
- extension.
-
- In case none is found, a ValueError is raised.
- """
- if extract_dir is None:
- extract_dir = os.getcwd()
-
- if format is not None:
- try:
- format_info = _UNPACK_FORMATS[format]
- except KeyError:
- raise ValueError("Unknown unpack format '{0}'".format(format))
-
- func = format_info[1]
- func(filename, extract_dir, **dict(format_info[2]))
- else:
- # we need to look at the registered unpackers supported extensions
- format = _find_unpack_format(filename)
- if format is None:
- raise ReadError("Unknown archive format '{0}'".format(filename))
-
- func = _UNPACK_FORMATS[format][1]
- kwargs = dict(_UNPACK_FORMATS[format][2])
- func(filename, extract_dir, **kwargs)
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg b/env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg
deleted file mode 100644
index 1746bd0..0000000
--- a/env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg
+++ /dev/null
@@ -1,84 +0,0 @@
-[posix_prefix]
-# Configuration directories. Some of these come straight out of the
-# configure script. They are for implementing the other variables, not to
-# be used directly in [resource_locations].
-confdir = /etc
-datadir = /usr/share
-libdir = /usr/lib
-statedir = /var
-# User resource directory
-local = ~/.local/{distribution.name}
-
-stdlib = {base}/lib/python{py_version_short}
-platstdlib = {platbase}/lib/python{py_version_short}
-purelib = {base}/lib/python{py_version_short}/site-packages
-platlib = {platbase}/lib/python{py_version_short}/site-packages
-include = {base}/include/python{py_version_short}{abiflags}
-platinclude = {platbase}/include/python{py_version_short}{abiflags}
-data = {base}
-
-[posix_home]
-stdlib = {base}/lib/python
-platstdlib = {base}/lib/python
-purelib = {base}/lib/python
-platlib = {base}/lib/python
-include = {base}/include/python
-platinclude = {base}/include/python
-scripts = {base}/bin
-data = {base}
-
-[nt]
-stdlib = {base}/Lib
-platstdlib = {base}/Lib
-purelib = {base}/Lib/site-packages
-platlib = {base}/Lib/site-packages
-include = {base}/Include
-platinclude = {base}/Include
-scripts = {base}/Scripts
-data = {base}
-
-[os2]
-stdlib = {base}/Lib
-platstdlib = {base}/Lib
-purelib = {base}/Lib/site-packages
-platlib = {base}/Lib/site-packages
-include = {base}/Include
-platinclude = {base}/Include
-scripts = {base}/Scripts
-data = {base}
-
-[os2_home]
-stdlib = {userbase}/lib/python{py_version_short}
-platstdlib = {userbase}/lib/python{py_version_short}
-purelib = {userbase}/lib/python{py_version_short}/site-packages
-platlib = {userbase}/lib/python{py_version_short}/site-packages
-include = {userbase}/include/python{py_version_short}
-scripts = {userbase}/bin
-data = {userbase}
-
-[nt_user]
-stdlib = {userbase}/Python{py_version_nodot}
-platstdlib = {userbase}/Python{py_version_nodot}
-purelib = {userbase}/Python{py_version_nodot}/site-packages
-platlib = {userbase}/Python{py_version_nodot}/site-packages
-include = {userbase}/Python{py_version_nodot}/Include
-scripts = {userbase}/Scripts
-data = {userbase}
-
-[posix_user]
-stdlib = {userbase}/lib/python{py_version_short}
-platstdlib = {userbase}/lib/python{py_version_short}
-purelib = {userbase}/lib/python{py_version_short}/site-packages
-platlib = {userbase}/lib/python{py_version_short}/site-packages
-include = {userbase}/include/python{py_version_short}
-scripts = {userbase}/bin
-data = {userbase}
-
-[osx_framework_user]
-stdlib = {userbase}/lib/python
-platstdlib = {userbase}/lib/python
-purelib = {userbase}/lib/python/site-packages
-platlib = {userbase}/lib/python/site-packages
-include = {userbase}/include
-scripts = {userbase}/bin
-data = {userbase}
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.py b/env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.py
deleted file mode 100644
index ec28480..0000000
--- a/env/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.py
+++ /dev/null
@@ -1,788 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2012 The Python Software Foundation.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-"""Access to Python's configuration information."""
-
-import codecs
-import os
-import re
-import sys
-from os.path import pardir, realpath
-try:
- import configparser
-except ImportError:
- import ConfigParser as configparser
-
-
-__all__ = [
- 'get_config_h_filename',
- 'get_config_var',
- 'get_config_vars',
- 'get_makefile_filename',
- 'get_path',
- 'get_path_names',
- 'get_paths',
- 'get_platform',
- 'get_python_version',
- 'get_scheme_names',
- 'parse_config_h',
-]
-
-
-def _safe_realpath(path):
- try:
- return realpath(path)
- except OSError:
- return path
-
-
-if sys.executable:
- _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
-else:
- # sys.executable can be empty if argv[0] has been changed and Python is
- # unable to retrieve the real program name
- _PROJECT_BASE = _safe_realpath(os.getcwd())
-
-if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
- _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
-# PC/VS7.1
-if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
- _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
-# PC/AMD64
-if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
- _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
-
-
-def is_python_build():
- for fn in ("Setup.dist", "Setup.local"):
- if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
- return True
- return False
-
-_PYTHON_BUILD = is_python_build()
-
-_cfg_read = False
-
-def _ensure_cfg_read():
- global _cfg_read
- if not _cfg_read:
- from ..resources import finder
- backport_package = __name__.rsplit('.', 1)[0]
- _finder = finder(backport_package)
- _cfgfile = _finder.find('sysconfig.cfg')
- assert _cfgfile, 'sysconfig.cfg exists'
- with _cfgfile.as_stream() as s:
- _SCHEMES.readfp(s)
- if _PYTHON_BUILD:
- for scheme in ('posix_prefix', 'posix_home'):
- _SCHEMES.set(scheme, 'include', '{srcdir}/Include')
- _SCHEMES.set(scheme, 'platinclude', '{projectbase}/.')
-
- _cfg_read = True
-
-
-_SCHEMES = configparser.RawConfigParser()
-_VAR_REPL = re.compile(r'\{([^{]*?)\}')
-
-def _expand_globals(config):
- _ensure_cfg_read()
- if config.has_section('globals'):
- globals = config.items('globals')
- else:
- globals = tuple()
-
- sections = config.sections()
- for section in sections:
- if section == 'globals':
- continue
- for option, value in globals:
- if config.has_option(section, option):
- continue
- config.set(section, option, value)
- config.remove_section('globals')
-
- # now expanding local variables defined in the cfg file
- #
- for section in config.sections():
- variables = dict(config.items(section))
-
- def _replacer(matchobj):
- name = matchobj.group(1)
- if name in variables:
- return variables[name]
- return matchobj.group(0)
-
- for option, value in config.items(section):
- config.set(section, option, _VAR_REPL.sub(_replacer, value))
-
-#_expand_globals(_SCHEMES)
-
- # FIXME don't rely on sys.version here, its format is an implementation detail
- # of CPython, use sys.version_info or sys.hexversion
-_PY_VERSION = sys.version.split()[0]
-_PY_VERSION_SHORT = sys.version[:3]
-_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
-_PREFIX = os.path.normpath(sys.prefix)
-_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
-_CONFIG_VARS = None
-_USER_BASE = None
-
-
-def _subst_vars(path, local_vars):
- """In the string `path`, replace tokens like {some.thing} with the
- corresponding value from the map `local_vars`.
-
- If there is no corresponding value, leave the token unchanged.
- """
- def _replacer(matchobj):
- name = matchobj.group(1)
- if name in local_vars:
- return local_vars[name]
- elif name in os.environ:
- return os.environ[name]
- return matchobj.group(0)
- return _VAR_REPL.sub(_replacer, path)
-
-
-def _extend_dict(target_dict, other_dict):
- target_keys = target_dict.keys()
- for key, value in other_dict.items():
- if key in target_keys:
- continue
- target_dict[key] = value
-
-
-def _expand_vars(scheme, vars):
- res = {}
- if vars is None:
- vars = {}
- _extend_dict(vars, get_config_vars())
-
- for key, value in _SCHEMES.items(scheme):
- if os.name in ('posix', 'nt'):
- value = os.path.expanduser(value)
- res[key] = os.path.normpath(_subst_vars(value, vars))
- return res
-
-
-def format_value(value, vars):
- def _replacer(matchobj):
- name = matchobj.group(1)
- if name in vars:
- return vars[name]
- return matchobj.group(0)
- return _VAR_REPL.sub(_replacer, value)
-
-
-def _get_default_scheme():
- if os.name == 'posix':
- # the default scheme for posix is posix_prefix
- return 'posix_prefix'
- return os.name
-
-
-def _getuserbase():
- env_base = os.environ.get("PYTHONUSERBASE", None)
-
- def joinuser(*args):
- return os.path.expanduser(os.path.join(*args))
-
- # what about 'os2emx', 'riscos' ?
- if os.name == "nt":
- base = os.environ.get("APPDATA") or "~"
- if env_base:
- return env_base
- else:
- return joinuser(base, "Python")
-
- if sys.platform == "darwin":
- framework = get_config_var("PYTHONFRAMEWORK")
- if framework:
- if env_base:
- return env_base
- else:
- return joinuser("~", "Library", framework, "%d.%d" %
- sys.version_info[:2])
-
- if env_base:
- return env_base
- else:
- return joinuser("~", ".local")
-
-
-def _parse_makefile(filename, vars=None):
- """Parse a Makefile-style file.
-
- A dictionary containing name/value pairs is returned. If an
- optional dictionary is passed in as the second argument, it is
- used instead of a new dictionary.
- """
- # Regexes needed for parsing Makefile (and similar syntaxes,
- # like old-style Setup files).
- _variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
- _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
- _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
-
- if vars is None:
- vars = {}
- done = {}
- notdone = {}
-
- with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f:
- lines = f.readlines()
-
- for line in lines:
- if line.startswith('#') or line.strip() == '':
- continue
- m = _variable_rx.match(line)
- if m:
- n, v = m.group(1, 2)
- v = v.strip()
- # `$$' is a literal `$' in make
- tmpv = v.replace('$$', '')
-
- if "$" in tmpv:
- notdone[n] = v
- else:
- try:
- v = int(v)
- except ValueError:
- # insert literal `$'
- done[n] = v.replace('$$', '$')
- else:
- done[n] = v
-
- # do variable interpolation here
- variables = list(notdone.keys())
-
- # Variables with a 'PY_' prefix in the makefile. These need to
- # be made available without that prefix through sysconfig.
- # Special care is needed to ensure that variable expansion works, even
- # if the expansion uses the name without a prefix.
- renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
-
- while len(variables) > 0:
- for name in tuple(variables):
- value = notdone[name]
- m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
- if m is not None:
- n = m.group(1)
- found = True
- if n in done:
- item = str(done[n])
- elif n in notdone:
- # get it on a subsequent round
- found = False
- elif n in os.environ:
- # do it like make: fall back to environment
- item = os.environ[n]
-
- elif n in renamed_variables:
- if (name.startswith('PY_') and
- name[3:] in renamed_variables):
- item = ""
-
- elif 'PY_' + n in notdone:
- found = False
-
- else:
- item = str(done['PY_' + n])
-
- else:
- done[n] = item = ""
-
- if found:
- after = value[m.end():]
- value = value[:m.start()] + item + after
- if "$" in after:
- notdone[name] = value
- else:
- try:
- value = int(value)
- except ValueError:
- done[name] = value.strip()
- else:
- done[name] = value
- variables.remove(name)
-
- if (name.startswith('PY_') and
- name[3:] in renamed_variables):
-
- name = name[3:]
- if name not in done:
- done[name] = value
-
- else:
- # bogus variable reference (e.g. "prefix=$/opt/python");
- # just drop it since we can't deal
- done[name] = value
- variables.remove(name)
-
- # strip spurious spaces
- for k, v in done.items():
- if isinstance(v, str):
- done[k] = v.strip()
-
- # save the results in the global dictionary
- vars.update(done)
- return vars
-
-
-def get_makefile_filename():
- """Return the path of the Makefile."""
- if _PYTHON_BUILD:
- return os.path.join(_PROJECT_BASE, "Makefile")
- if hasattr(sys, 'abiflags'):
- config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
- else:
- config_dir_name = 'config'
- return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
-
-
-def _init_posix(vars):
- """Initialize the module as appropriate for POSIX systems."""
- # load the installed Makefile:
- makefile = get_makefile_filename()
- try:
- _parse_makefile(makefile, vars)
- except IOError as e:
- msg = "invalid Python installation: unable to open %s" % makefile
- if hasattr(e, "strerror"):
- msg = msg + " (%s)" % e.strerror
- raise IOError(msg)
- # load the installed pyconfig.h:
- config_h = get_config_h_filename()
- try:
- with open(config_h) as f:
- parse_config_h(f, vars)
- except IOError as e:
- msg = "invalid Python installation: unable to open %s" % config_h
- if hasattr(e, "strerror"):
- msg = msg + " (%s)" % e.strerror
- raise IOError(msg)
- # On AIX, there are wrong paths to the linker scripts in the Makefile
- # -- these paths are relative to the Python source, but when installed
- # the scripts are in another directory.
- if _PYTHON_BUILD:
- vars['LDSHARED'] = vars['BLDSHARED']
-
-
-def _init_non_posix(vars):
- """Initialize the module as appropriate for NT"""
- # set basic install directories
- vars['LIBDEST'] = get_path('stdlib')
- vars['BINLIBDEST'] = get_path('platstdlib')
- vars['INCLUDEPY'] = get_path('include')
- vars['SO'] = '.pyd'
- vars['EXE'] = '.exe'
- vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
- vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
-
-#
-# public APIs
-#
-
-
-def parse_config_h(fp, vars=None):
- """Parse a config.h-style file.
-
- A dictionary containing name/value pairs is returned. If an
- optional dictionary is passed in as the second argument, it is
- used instead of a new dictionary.
- """
- if vars is None:
- vars = {}
- define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
- undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
-
- while True:
- line = fp.readline()
- if not line:
- break
- m = define_rx.match(line)
- if m:
- n, v = m.group(1, 2)
- try:
- v = int(v)
- except ValueError:
- pass
- vars[n] = v
- else:
- m = undef_rx.match(line)
- if m:
- vars[m.group(1)] = 0
- return vars
-
-
-def get_config_h_filename():
- """Return the path of pyconfig.h."""
- if _PYTHON_BUILD:
- if os.name == "nt":
- inc_dir = os.path.join(_PROJECT_BASE, "PC")
- else:
- inc_dir = _PROJECT_BASE
- else:
- inc_dir = get_path('platinclude')
- return os.path.join(inc_dir, 'pyconfig.h')
-
-
-def get_scheme_names():
- """Return a tuple containing the schemes names."""
- return tuple(sorted(_SCHEMES.sections()))
-
-
-def get_path_names():
- """Return a tuple containing the paths names."""
- # xxx see if we want a static list
- return _SCHEMES.options('posix_prefix')
-
-
-def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
- """Return a mapping containing an install scheme.
-
- ``scheme`` is the install scheme name. If not provided, it will
- return the default scheme for the current platform.
- """
- _ensure_cfg_read()
- if expand:
- return _expand_vars(scheme, vars)
- else:
- return dict(_SCHEMES.items(scheme))
-
-
-def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
- """Return a path corresponding to the scheme.
-
- ``scheme`` is the install scheme name.
- """
- return get_paths(scheme, vars, expand)[name]
-
-
-def get_config_vars(*args):
- """With no arguments, return a dictionary of all configuration
- variables relevant for the current platform.
-
- On Unix, this means every variable defined in Python's installed Makefile;
- On Windows and Mac OS it's a much smaller set.
-
- With arguments, return a list of values that result from looking up
- each argument in the configuration variable dictionary.
- """
- global _CONFIG_VARS
- if _CONFIG_VARS is None:
- _CONFIG_VARS = {}
- # Normalized versions of prefix and exec_prefix are handy to have;
- # in fact, these are the standard versions used most places in the
- # distutils2 module.
- _CONFIG_VARS['prefix'] = _PREFIX
- _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
- _CONFIG_VARS['py_version'] = _PY_VERSION
- _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
- _CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
- _CONFIG_VARS['base'] = _PREFIX
- _CONFIG_VARS['platbase'] = _EXEC_PREFIX
- _CONFIG_VARS['projectbase'] = _PROJECT_BASE
- try:
- _CONFIG_VARS['abiflags'] = sys.abiflags
- except AttributeError:
- # sys.abiflags may not be defined on all platforms.
- _CONFIG_VARS['abiflags'] = ''
-
- if os.name in ('nt', 'os2'):
- _init_non_posix(_CONFIG_VARS)
- if os.name == 'posix':
- _init_posix(_CONFIG_VARS)
- # Setting 'userbase' is done below the call to the
- # init function to enable using 'get_config_var' in
- # the init-function.
- if sys.version >= '2.6':
- _CONFIG_VARS['userbase'] = _getuserbase()
-
- if 'srcdir' not in _CONFIG_VARS:
- _CONFIG_VARS['srcdir'] = _PROJECT_BASE
- else:
- _CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir'])
-
- # Convert srcdir into an absolute path if it appears necessary.
- # Normally it is relative to the build directory. However, during
- # testing, for example, we might be running a non-installed python
- # from a different directory.
- if _PYTHON_BUILD and os.name == "posix":
- base = _PROJECT_BASE
- try:
- cwd = os.getcwd()
- except OSError:
- cwd = None
- if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
- base != cwd):
- # srcdir is relative and we are not in the same directory
- # as the executable. Assume executable is in the build
- # directory and make srcdir absolute.
- srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
- _CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
-
- if sys.platform == 'darwin':
- kernel_version = os.uname()[2] # Kernel version (8.4.3)
- major_version = int(kernel_version.split('.')[0])
-
- if major_version < 8:
- # On macOS before 10.4, check if -arch and -isysroot
- # are in CFLAGS or LDFLAGS and remove them if they are.
- # This is needed when building extensions on a 10.3 system
- # using a universal build of python.
- for key in ('LDFLAGS', 'BASECFLAGS',
- # a number of derived variables. These need to be
- # patched up as well.
- 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
- flags = _CONFIG_VARS[key]
- flags = re.sub('-arch\s+\w+\s', ' ', flags)
- flags = re.sub('-isysroot [^ \t]*', ' ', flags)
- _CONFIG_VARS[key] = flags
- else:
- # Allow the user to override the architecture flags using
- # an environment variable.
- # NOTE: This name was introduced by Apple in OSX 10.5 and
- # is used by several scripting languages distributed with
- # that OS release.
- if 'ARCHFLAGS' in os.environ:
- arch = os.environ['ARCHFLAGS']
- for key in ('LDFLAGS', 'BASECFLAGS',
- # a number of derived variables. These need to be
- # patched up as well.
- 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
-
- flags = _CONFIG_VARS[key]
- flags = re.sub('-arch\s+\w+\s', ' ', flags)
- flags = flags + ' ' + arch
- _CONFIG_VARS[key] = flags
-
- # If we're on OSX 10.5 or later and the user tries to
- # compiles an extension using an SDK that is not present
- # on the current machine it is better to not use an SDK
- # than to fail.
- #
- # The major usecase for this is users using a Python.org
- # binary installer on OSX 10.6: that installer uses
- # the 10.4u SDK, but that SDK is not installed by default
- # when you install Xcode.
- #
- CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
- m = re.search('-isysroot\s+(\S+)', CFLAGS)
- if m is not None:
- sdk = m.group(1)
- if not os.path.exists(sdk):
- for key in ('LDFLAGS', 'BASECFLAGS',
- # a number of derived variables. These need to be
- # patched up as well.
- 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
-
- flags = _CONFIG_VARS[key]
- flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
- _CONFIG_VARS[key] = flags
-
- if args:
- vals = []
- for name in args:
- vals.append(_CONFIG_VARS.get(name))
- return vals
- else:
- return _CONFIG_VARS
-
-
-def get_config_var(name):
- """Return the value of a single variable using the dictionary returned by
- 'get_config_vars()'.
-
- Equivalent to get_config_vars().get(name)
- """
- return get_config_vars().get(name)
-
-
-def get_platform():
- """Return a string that identifies the current platform.
-
- This is used mainly to distinguish platform-specific build directories and
- platform-specific built distributions. Typically includes the OS name
- and version and the architecture (as supplied by 'os.uname()'),
- although the exact information included depends on the OS; eg. for IRIX
- the architecture isn't particularly important (IRIX only runs on SGI
- hardware), but for Linux the kernel version isn't particularly
- important.
-
- Examples of returned values:
- linux-i586
- linux-alpha (?)
- solaris-2.6-sun4u
- irix-5.3
- irix64-6.2
-
- Windows will return one of:
- win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
- win-ia64 (64bit Windows on Itanium)
- win32 (all others - specifically, sys.platform is returned)
-
- For other non-POSIX platforms, currently just returns 'sys.platform'.
- """
- if os.name == 'nt':
- # sniff sys.version for architecture.
- prefix = " bit ("
- i = sys.version.find(prefix)
- if i == -1:
- return sys.platform
- j = sys.version.find(")", i)
- look = sys.version[i+len(prefix):j].lower()
- if look == 'amd64':
- return 'win-amd64'
- if look == 'itanium':
- return 'win-ia64'
- return sys.platform
-
- if os.name != "posix" or not hasattr(os, 'uname'):
- # XXX what about the architecture? NT is Intel or Alpha,
- # Mac OS is M68k or PPC, etc.
- return sys.platform
-
- # Try to distinguish various flavours of Unix
- osname, host, release, version, machine = os.uname()
-
- # Convert the OS name to lowercase, remove '/' characters
- # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
- osname = osname.lower().replace('/', '')
- machine = machine.replace(' ', '_')
- machine = machine.replace('/', '-')
-
- if osname[:5] == "linux":
- # At least on Linux/Intel, 'machine' is the processor --
- # i386, etc.
- # XXX what about Alpha, SPARC, etc?
- return "%s-%s" % (osname, machine)
- elif osname[:5] == "sunos":
- if release[0] >= "5": # SunOS 5 == Solaris 2
- osname = "solaris"
- release = "%d.%s" % (int(release[0]) - 3, release[2:])
- # fall through to standard osname-release-machine representation
- elif osname[:4] == "irix": # could be "irix64"!
- return "%s-%s" % (osname, release)
- elif osname[:3] == "aix":
- return "%s-%s.%s" % (osname, version, release)
- elif osname[:6] == "cygwin":
- osname = "cygwin"
- rel_re = re.compile(r'[\d.]+')
- m = rel_re.match(release)
- if m:
- release = m.group()
- elif osname[:6] == "darwin":
- #
- # For our purposes, we'll assume that the system version from
- # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
- # to. This makes the compatibility story a bit more sane because the
- # machine is going to compile and link as if it were
- # MACOSX_DEPLOYMENT_TARGET.
- cfgvars = get_config_vars()
- macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
-
- if True:
- # Always calculate the release of the running machine,
- # needed to determine if we can build fat binaries or not.
-
- macrelease = macver
- # Get the system version. Reading this plist is a documented
- # way to get the system version (see the documentation for
- # the Gestalt Manager)
- try:
- f = open('/System/Library/CoreServices/SystemVersion.plist')
- except IOError:
- # We're on a plain darwin box, fall back to the default
- # behaviour.
- pass
- else:
- try:
- m = re.search(r'ProductUserVisibleVersion\s*'
- r'(.*?)', f.read())
- finally:
- f.close()
- if m is not None:
- macrelease = '.'.join(m.group(1).split('.')[:2])
- # else: fall back to the default behaviour
-
- if not macver:
- macver = macrelease
-
- if macver:
- release = macver
- osname = "macosx"
-
- if ((macrelease + '.') >= '10.4.' and
- '-arch' in get_config_vars().get('CFLAGS', '').strip()):
- # The universal build will build fat binaries, but not on
- # systems before 10.4
- #
- # Try to detect 4-way universal builds, those have machine-type
- # 'universal' instead of 'fat'.
-
- machine = 'fat'
- cflags = get_config_vars().get('CFLAGS')
-
- archs = re.findall('-arch\s+(\S+)', cflags)
- archs = tuple(sorted(set(archs)))
-
- if len(archs) == 1:
- machine = archs[0]
- elif archs == ('i386', 'ppc'):
- machine = 'fat'
- elif archs == ('i386', 'x86_64'):
- machine = 'intel'
- elif archs == ('i386', 'ppc', 'x86_64'):
- machine = 'fat3'
- elif archs == ('ppc64', 'x86_64'):
- machine = 'fat64'
- elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
- machine = 'universal'
- else:
- raise ValueError(
- "Don't know machine value for archs=%r" % (archs,))
-
- elif machine == 'i386':
- # On OSX the machine type returned by uname is always the
- # 32-bit variant, even if the executable architecture is
- # the 64-bit variant
- if sys.maxsize >= 2**32:
- machine = 'x86_64'
-
- elif machine in ('PowerPC', 'Power_Macintosh'):
- # Pick a sane name for the PPC architecture.
- # See 'i386' case
- if sys.maxsize >= 2**32:
- machine = 'ppc64'
- else:
- machine = 'ppc'
-
- return "%s-%s-%s" % (osname, release, machine)
-
-
-def get_python_version():
- return _PY_VERSION_SHORT
-
-
-def _print_dict(title, data):
- for index, (key, value) in enumerate(sorted(data.items())):
- if index == 0:
- print('%s: ' % (title))
- print('\t%s = "%s"' % (key, value))
-
-
-def _main():
- """Display all information sysconfig detains."""
- print('Platform: "%s"' % get_platform())
- print('Python version: "%s"' % get_python_version())
- print('Current installation scheme: "%s"' % _get_default_scheme())
- print()
- _print_dict('Paths', get_paths())
- print()
- _print_dict('Variables', get_config_vars())
-
-
-if __name__ == '__main__':
- _main()
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py b/env/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py
deleted file mode 100644
index d66d856..0000000
--- a/env/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py
+++ /dev/null
@@ -1,2607 +0,0 @@
-#-------------------------------------------------------------------
-# tarfile.py
-#-------------------------------------------------------------------
-# Copyright (C) 2002 Lars Gustaebel
-# All rights reserved.
-#
-# Permission is hereby granted, free of charge, to any person
-# obtaining a copy of this software and associated documentation
-# files (the "Software"), to deal in the Software without
-# restriction, including without limitation the rights to use,
-# copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following
-# conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-from __future__ import print_function
-
-"""Read from and write to tar format archives.
-"""
-
-__version__ = "$Revision$"
-
-version = "0.9.0"
-__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)"
-__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
-__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
-__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
-
-#---------
-# Imports
-#---------
-import sys
-import os
-import stat
-import errno
-import time
-import struct
-import copy
-import re
-
-try:
- import grp, pwd
-except ImportError:
- grp = pwd = None
-
-# os.symlink on Windows prior to 6.0 raises NotImplementedError
-symlink_exception = (AttributeError, NotImplementedError)
-try:
- # WindowsError (1314) will be raised if the caller does not hold the
- # SeCreateSymbolicLinkPrivilege privilege
- symlink_exception += (WindowsError,)
-except NameError:
- pass
-
-# from tarfile import *
-__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
-
-if sys.version_info[0] < 3:
- import __builtin__ as builtins
-else:
- import builtins
-
-_open = builtins.open # Since 'open' is TarFile.open
-
-#---------------------------------------------------------
-# tar constants
-#---------------------------------------------------------
-NUL = b"\0" # the null character
-BLOCKSIZE = 512 # length of processing blocks
-RECORDSIZE = BLOCKSIZE * 20 # length of records
-GNU_MAGIC = b"ustar \0" # magic gnu tar string
-POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
-
-LENGTH_NAME = 100 # maximum length of a filename
-LENGTH_LINK = 100 # maximum length of a linkname
-LENGTH_PREFIX = 155 # maximum length of the prefix field
-
-REGTYPE = b"0" # regular file
-AREGTYPE = b"\0" # regular file
-LNKTYPE = b"1" # link (inside tarfile)
-SYMTYPE = b"2" # symbolic link
-CHRTYPE = b"3" # character special device
-BLKTYPE = b"4" # block special device
-DIRTYPE = b"5" # directory
-FIFOTYPE = b"6" # fifo special device
-CONTTYPE = b"7" # contiguous file
-
-GNUTYPE_LONGNAME = b"L" # GNU tar longname
-GNUTYPE_LONGLINK = b"K" # GNU tar longlink
-GNUTYPE_SPARSE = b"S" # GNU tar sparse file
-
-XHDTYPE = b"x" # POSIX.1-2001 extended header
-XGLTYPE = b"g" # POSIX.1-2001 global header
-SOLARIS_XHDTYPE = b"X" # Solaris extended header
-
-USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
-GNU_FORMAT = 1 # GNU tar format
-PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
-DEFAULT_FORMAT = GNU_FORMAT
-
-#---------------------------------------------------------
-# tarfile constants
-#---------------------------------------------------------
-# File types that tarfile supports:
-SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
- SYMTYPE, DIRTYPE, FIFOTYPE,
- CONTTYPE, CHRTYPE, BLKTYPE,
- GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
- GNUTYPE_SPARSE)
-
-# File types that will be treated as a regular file.
-REGULAR_TYPES = (REGTYPE, AREGTYPE,
- CONTTYPE, GNUTYPE_SPARSE)
-
-# File types that are part of the GNU tar format.
-GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
- GNUTYPE_SPARSE)
-
-# Fields from a pax header that override a TarInfo attribute.
-PAX_FIELDS = ("path", "linkpath", "size", "mtime",
- "uid", "gid", "uname", "gname")
-
-# Fields from a pax header that are affected by hdrcharset.
-PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname"))
-
-# Fields in a pax header that are numbers, all other fields
-# are treated as strings.
-PAX_NUMBER_FIELDS = {
- "atime": float,
- "ctime": float,
- "mtime": float,
- "uid": int,
- "gid": int,
- "size": int
-}
-
-#---------------------------------------------------------
-# Bits used in the mode field, values in octal.
-#---------------------------------------------------------
-S_IFLNK = 0o120000 # symbolic link
-S_IFREG = 0o100000 # regular file
-S_IFBLK = 0o060000 # block device
-S_IFDIR = 0o040000 # directory
-S_IFCHR = 0o020000 # character device
-S_IFIFO = 0o010000 # fifo
-
-TSUID = 0o4000 # set UID on execution
-TSGID = 0o2000 # set GID on execution
-TSVTX = 0o1000 # reserved
-
-TUREAD = 0o400 # read by owner
-TUWRITE = 0o200 # write by owner
-TUEXEC = 0o100 # execute/search by owner
-TGREAD = 0o040 # read by group
-TGWRITE = 0o020 # write by group
-TGEXEC = 0o010 # execute/search by group
-TOREAD = 0o004 # read by other
-TOWRITE = 0o002 # write by other
-TOEXEC = 0o001 # execute/search by other
-
-#---------------------------------------------------------
-# initialization
-#---------------------------------------------------------
-if os.name in ("nt", "ce"):
- ENCODING = "utf-8"
-else:
- ENCODING = sys.getfilesystemencoding()
-
-#---------------------------------------------------------
-# Some useful functions
-#---------------------------------------------------------
-
-def stn(s, length, encoding, errors):
- """Convert a string to a null-terminated bytes object.
- """
- s = s.encode(encoding, errors)
- return s[:length] + (length - len(s)) * NUL
-
-def nts(s, encoding, errors):
- """Convert a null-terminated bytes object to a string.
- """
- p = s.find(b"\0")
- if p != -1:
- s = s[:p]
- return s.decode(encoding, errors)
-
-def nti(s):
- """Convert a number field to a python number.
- """
- # There are two possible encodings for a number field, see
- # itn() below.
- if s[0] != chr(0o200):
- try:
- n = int(nts(s, "ascii", "strict") or "0", 8)
- except ValueError:
- raise InvalidHeaderError("invalid header")
- else:
- n = 0
- for i in range(len(s) - 1):
- n <<= 8
- n += ord(s[i + 1])
- return n
-
-def itn(n, digits=8, format=DEFAULT_FORMAT):
- """Convert a python number to a number field.
- """
- # POSIX 1003.1-1988 requires numbers to be encoded as a string of
- # octal digits followed by a null-byte, this allows values up to
- # (8**(digits-1))-1. GNU tar allows storing numbers greater than
- # that if necessary. A leading 0o200 byte indicates this particular
- # encoding, the following digits-1 bytes are a big-endian
- # representation. This allows values up to (256**(digits-1))-1.
- if 0 <= n < 8 ** (digits - 1):
- s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL
- else:
- if format != GNU_FORMAT or n >= 256 ** (digits - 1):
- raise ValueError("overflow in number field")
-
- if n < 0:
- # XXX We mimic GNU tar's behaviour with negative numbers,
- # this could raise OverflowError.
- n = struct.unpack("L", struct.pack("l", n))[0]
-
- s = bytearray()
- for i in range(digits - 1):
- s.insert(0, n & 0o377)
- n >>= 8
- s.insert(0, 0o200)
- return s
-
-def calc_chksums(buf):
- """Calculate the checksum for a member's header by summing up all
- characters except for the chksum field which is treated as if
- it was filled with spaces. According to the GNU tar sources,
- some tars (Sun and NeXT) calculate chksum with signed char,
- which will be different if there are chars in the buffer with
- the high bit set. So we calculate two checksums, unsigned and
- signed.
- """
- unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
- signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
- return unsigned_chksum, signed_chksum
-
-def copyfileobj(src, dst, length=None):
- """Copy length bytes from fileobj src to fileobj dst.
- If length is None, copy the entire content.
- """
- if length == 0:
- return
- if length is None:
- while True:
- buf = src.read(16*1024)
- if not buf:
- break
- dst.write(buf)
- return
-
- BUFSIZE = 16 * 1024
- blocks, remainder = divmod(length, BUFSIZE)
- for b in range(blocks):
- buf = src.read(BUFSIZE)
- if len(buf) < BUFSIZE:
- raise IOError("end of file reached")
- dst.write(buf)
-
- if remainder != 0:
- buf = src.read(remainder)
- if len(buf) < remainder:
- raise IOError("end of file reached")
- dst.write(buf)
- return
-
-filemode_table = (
- ((S_IFLNK, "l"),
- (S_IFREG, "-"),
- (S_IFBLK, "b"),
- (S_IFDIR, "d"),
- (S_IFCHR, "c"),
- (S_IFIFO, "p")),
-
- ((TUREAD, "r"),),
- ((TUWRITE, "w"),),
- ((TUEXEC|TSUID, "s"),
- (TSUID, "S"),
- (TUEXEC, "x")),
-
- ((TGREAD, "r"),),
- ((TGWRITE, "w"),),
- ((TGEXEC|TSGID, "s"),
- (TSGID, "S"),
- (TGEXEC, "x")),
-
- ((TOREAD, "r"),),
- ((TOWRITE, "w"),),
- ((TOEXEC|TSVTX, "t"),
- (TSVTX, "T"),
- (TOEXEC, "x"))
-)
-
-def filemode(mode):
- """Convert a file's mode to a string of the form
- -rwxrwxrwx.
- Used by TarFile.list()
- """
- perm = []
- for table in filemode_table:
- for bit, char in table:
- if mode & bit == bit:
- perm.append(char)
- break
- else:
- perm.append("-")
- return "".join(perm)
-
-class TarError(Exception):
- """Base exception."""
- pass
-class ExtractError(TarError):
- """General exception for extract errors."""
- pass
-class ReadError(TarError):
- """Exception for unreadable tar archives."""
- pass
-class CompressionError(TarError):
- """Exception for unavailable compression methods."""
- pass
-class StreamError(TarError):
- """Exception for unsupported operations on stream-like TarFiles."""
- pass
-class HeaderError(TarError):
- """Base exception for header errors."""
- pass
-class EmptyHeaderError(HeaderError):
- """Exception for empty headers."""
- pass
-class TruncatedHeaderError(HeaderError):
- """Exception for truncated headers."""
- pass
-class EOFHeaderError(HeaderError):
- """Exception for end of file headers."""
- pass
-class InvalidHeaderError(HeaderError):
- """Exception for invalid headers."""
- pass
-class SubsequentHeaderError(HeaderError):
- """Exception for missing and invalid extended headers."""
- pass
-
-#---------------------------
-# internal stream interface
-#---------------------------
-class _LowLevelFile(object):
- """Low-level file object. Supports reading and writing.
- It is used instead of a regular file object for streaming
- access.
- """
-
- def __init__(self, name, mode):
- mode = {
- "r": os.O_RDONLY,
- "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
- }[mode]
- if hasattr(os, "O_BINARY"):
- mode |= os.O_BINARY
- self.fd = os.open(name, mode, 0o666)
-
- def close(self):
- os.close(self.fd)
-
- def read(self, size):
- return os.read(self.fd, size)
-
- def write(self, s):
- os.write(self.fd, s)
-
-class _Stream(object):
- """Class that serves as an adapter between TarFile and
- a stream-like object. The stream-like object only
- needs to have a read() or write() method and is accessed
- blockwise. Use of gzip or bzip2 compression is possible.
- A stream-like object could be for example: sys.stdin,
- sys.stdout, a socket, a tape device etc.
-
- _Stream is intended to be used only internally.
- """
-
- def __init__(self, name, mode, comptype, fileobj, bufsize):
- """Construct a _Stream object.
- """
- self._extfileobj = True
- if fileobj is None:
- fileobj = _LowLevelFile(name, mode)
- self._extfileobj = False
-
- if comptype == '*':
- # Enable transparent compression detection for the
- # stream interface
- fileobj = _StreamProxy(fileobj)
- comptype = fileobj.getcomptype()
-
- self.name = name or ""
- self.mode = mode
- self.comptype = comptype
- self.fileobj = fileobj
- self.bufsize = bufsize
- self.buf = b""
- self.pos = 0
- self.closed = False
-
- try:
- if comptype == "gz":
- try:
- import zlib
- except ImportError:
- raise CompressionError("zlib module is not available")
- self.zlib = zlib
- self.crc = zlib.crc32(b"")
- if mode == "r":
- self._init_read_gz()
- else:
- self._init_write_gz()
-
- if comptype == "bz2":
- try:
- import bz2
- except ImportError:
- raise CompressionError("bz2 module is not available")
- if mode == "r":
- self.dbuf = b""
- self.cmp = bz2.BZ2Decompressor()
- else:
- self.cmp = bz2.BZ2Compressor()
- except:
- if not self._extfileobj:
- self.fileobj.close()
- self.closed = True
- raise
-
- def __del__(self):
- if hasattr(self, "closed") and not self.closed:
- self.close()
-
- def _init_write_gz(self):
- """Initialize for writing with gzip compression.
- """
- self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
- -self.zlib.MAX_WBITS,
- self.zlib.DEF_MEM_LEVEL,
- 0)
- timestamp = struct.pack(" self.bufsize:
- self.fileobj.write(self.buf[:self.bufsize])
- self.buf = self.buf[self.bufsize:]
-
- def close(self):
- """Close the _Stream object. No operation should be
- done on it afterwards.
- """
- if self.closed:
- return
-
- if self.mode == "w" and self.comptype != "tar":
- self.buf += self.cmp.flush()
-
- if self.mode == "w" and self.buf:
- self.fileobj.write(self.buf)
- self.buf = b""
- if self.comptype == "gz":
- # The native zlib crc is an unsigned 32-bit integer, but
- # the Python wrapper implicitly casts that to a signed C
- # long. So, on a 32-bit box self.crc may "look negative",
- # while the same crc on a 64-bit box may "look positive".
- # To avoid irksome warnings from the `struct` module, force
- # it to look positive on all boxes.
- self.fileobj.write(struct.pack("= 0:
- blocks, remainder = divmod(pos - self.pos, self.bufsize)
- for i in range(blocks):
- self.read(self.bufsize)
- self.read(remainder)
- else:
- raise StreamError("seeking backwards is not allowed")
- return self.pos
-
- def read(self, size=None):
- """Return the next size number of bytes from the stream.
- If size is not defined, return all bytes of the stream
- up to EOF.
- """
- if size is None:
- t = []
- while True:
- buf = self._read(self.bufsize)
- if not buf:
- break
- t.append(buf)
- buf = "".join(t)
- else:
- buf = self._read(size)
- self.pos += len(buf)
- return buf
-
- def _read(self, size):
- """Return size bytes from the stream.
- """
- if self.comptype == "tar":
- return self.__read(size)
-
- c = len(self.dbuf)
- while c < size:
- buf = self.__read(self.bufsize)
- if not buf:
- break
- try:
- buf = self.cmp.decompress(buf)
- except IOError:
- raise ReadError("invalid compressed data")
- self.dbuf += buf
- c += len(buf)
- buf = self.dbuf[:size]
- self.dbuf = self.dbuf[size:]
- return buf
-
- def __read(self, size):
- """Return size bytes from stream. If internal buffer is empty,
- read another block from the stream.
- """
- c = len(self.buf)
- while c < size:
- buf = self.fileobj.read(self.bufsize)
- if not buf:
- break
- self.buf += buf
- c += len(buf)
- buf = self.buf[:size]
- self.buf = self.buf[size:]
- return buf
-# class _Stream
-
-class _StreamProxy(object):
- """Small proxy class that enables transparent compression
- detection for the Stream interface (mode 'r|*').
- """
-
- def __init__(self, fileobj):
- self.fileobj = fileobj
- self.buf = self.fileobj.read(BLOCKSIZE)
-
- def read(self, size):
- self.read = self.fileobj.read
- return self.buf
-
- def getcomptype(self):
- if self.buf.startswith(b"\037\213\010"):
- return "gz"
- if self.buf.startswith(b"BZh91"):
- return "bz2"
- return "tar"
-
- def close(self):
- self.fileobj.close()
-# class StreamProxy
-
-class _BZ2Proxy(object):
- """Small proxy class that enables external file object
- support for "r:bz2" and "w:bz2" modes. This is actually
- a workaround for a limitation in bz2 module's BZ2File
- class which (unlike gzip.GzipFile) has no support for
- a file object argument.
- """
-
- blocksize = 16 * 1024
-
- def __init__(self, fileobj, mode):
- self.fileobj = fileobj
- self.mode = mode
- self.name = getattr(self.fileobj, "name", None)
- self.init()
-
- def init(self):
- import bz2
- self.pos = 0
- if self.mode == "r":
- self.bz2obj = bz2.BZ2Decompressor()
- self.fileobj.seek(0)
- self.buf = b""
- else:
- self.bz2obj = bz2.BZ2Compressor()
-
- def read(self, size):
- x = len(self.buf)
- while x < size:
- raw = self.fileobj.read(self.blocksize)
- if not raw:
- break
- data = self.bz2obj.decompress(raw)
- self.buf += data
- x += len(data)
-
- buf = self.buf[:size]
- self.buf = self.buf[size:]
- self.pos += len(buf)
- return buf
-
- def seek(self, pos):
- if pos < self.pos:
- self.init()
- self.read(pos - self.pos)
-
- def tell(self):
- return self.pos
-
- def write(self, data):
- self.pos += len(data)
- raw = self.bz2obj.compress(data)
- self.fileobj.write(raw)
-
- def close(self):
- if self.mode == "w":
- raw = self.bz2obj.flush()
- self.fileobj.write(raw)
-# class _BZ2Proxy
-
-#------------------------
-# Extraction file object
-#------------------------
-class _FileInFile(object):
- """A thin wrapper around an existing file object that
- provides a part of its data as an individual file
- object.
- """
-
- def __init__(self, fileobj, offset, size, blockinfo=None):
- self.fileobj = fileobj
- self.offset = offset
- self.size = size
- self.position = 0
-
- if blockinfo is None:
- blockinfo = [(0, size)]
-
- # Construct a map with data and zero blocks.
- self.map_index = 0
- self.map = []
- lastpos = 0
- realpos = self.offset
- for offset, size in blockinfo:
- if offset > lastpos:
- self.map.append((False, lastpos, offset, None))
- self.map.append((True, offset, offset + size, realpos))
- realpos += size
- lastpos = offset + size
- if lastpos < self.size:
- self.map.append((False, lastpos, self.size, None))
-
- def seekable(self):
- if not hasattr(self.fileobj, "seekable"):
- # XXX gzip.GzipFile and bz2.BZ2File
- return True
- return self.fileobj.seekable()
-
- def tell(self):
- """Return the current file position.
- """
- return self.position
-
- def seek(self, position):
- """Seek to a position in the file.
- """
- self.position = position
-
- def read(self, size=None):
- """Read data from the file.
- """
- if size is None:
- size = self.size - self.position
- else:
- size = min(size, self.size - self.position)
-
- buf = b""
- while size > 0:
- while True:
- data, start, stop, offset = self.map[self.map_index]
- if start <= self.position < stop:
- break
- else:
- self.map_index += 1
- if self.map_index == len(self.map):
- self.map_index = 0
- length = min(size, stop - self.position)
- if data:
- self.fileobj.seek(offset + (self.position - start))
- buf += self.fileobj.read(length)
- else:
- buf += NUL * length
- size -= length
- self.position += length
- return buf
-#class _FileInFile
-
-
-class ExFileObject(object):
- """File-like object for reading an archive member.
- Is returned by TarFile.extractfile().
- """
- blocksize = 1024
-
- def __init__(self, tarfile, tarinfo):
- self.fileobj = _FileInFile(tarfile.fileobj,
- tarinfo.offset_data,
- tarinfo.size,
- tarinfo.sparse)
- self.name = tarinfo.name
- self.mode = "r"
- self.closed = False
- self.size = tarinfo.size
-
- self.position = 0
- self.buffer = b""
-
- def readable(self):
- return True
-
- def writable(self):
- return False
-
- def seekable(self):
- return self.fileobj.seekable()
-
- def read(self, size=None):
- """Read at most size bytes from the file. If size is not
- present or None, read all data until EOF is reached.
- """
- if self.closed:
- raise ValueError("I/O operation on closed file")
-
- buf = b""
- if self.buffer:
- if size is None:
- buf = self.buffer
- self.buffer = b""
- else:
- buf = self.buffer[:size]
- self.buffer = self.buffer[size:]
-
- if size is None:
- buf += self.fileobj.read()
- else:
- buf += self.fileobj.read(size - len(buf))
-
- self.position += len(buf)
- return buf
-
- # XXX TextIOWrapper uses the read1() method.
- read1 = read
-
- def readline(self, size=-1):
- """Read one entire line from the file. If size is present
- and non-negative, return a string with at most that
- size, which may be an incomplete line.
- """
- if self.closed:
- raise ValueError("I/O operation on closed file")
-
- pos = self.buffer.find(b"\n") + 1
- if pos == 0:
- # no newline found.
- while True:
- buf = self.fileobj.read(self.blocksize)
- self.buffer += buf
- if not buf or b"\n" in buf:
- pos = self.buffer.find(b"\n") + 1
- if pos == 0:
- # no newline found.
- pos = len(self.buffer)
- break
-
- if size != -1:
- pos = min(size, pos)
-
- buf = self.buffer[:pos]
- self.buffer = self.buffer[pos:]
- self.position += len(buf)
- return buf
-
- def readlines(self):
- """Return a list with all remaining lines.
- """
- result = []
- while True:
- line = self.readline()
- if not line: break
- result.append(line)
- return result
-
- def tell(self):
- """Return the current file position.
- """
- if self.closed:
- raise ValueError("I/O operation on closed file")
-
- return self.position
-
- def seek(self, pos, whence=os.SEEK_SET):
- """Seek to a position in the file.
- """
- if self.closed:
- raise ValueError("I/O operation on closed file")
-
- if whence == os.SEEK_SET:
- self.position = min(max(pos, 0), self.size)
- elif whence == os.SEEK_CUR:
- if pos < 0:
- self.position = max(self.position + pos, 0)
- else:
- self.position = min(self.position + pos, self.size)
- elif whence == os.SEEK_END:
- self.position = max(min(self.size + pos, self.size), 0)
- else:
- raise ValueError("Invalid argument")
-
- self.buffer = b""
- self.fileobj.seek(self.position)
-
- def close(self):
- """Close the file object.
- """
- self.closed = True
-
- def __iter__(self):
- """Get an iterator over the file's lines.
- """
- while True:
- line = self.readline()
- if not line:
- break
- yield line
-#class ExFileObject
-
-#------------------
-# Exported Classes
-#------------------
-class TarInfo(object):
- """Informational class which holds the details about an
- archive member given by a tar header block.
- TarInfo objects are returned by TarFile.getmember(),
- TarFile.getmembers() and TarFile.gettarinfo() and are
- usually created internally.
- """
-
- __slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
- "chksum", "type", "linkname", "uname", "gname",
- "devmajor", "devminor",
- "offset", "offset_data", "pax_headers", "sparse",
- "tarfile", "_sparse_structs", "_link_target")
-
- def __init__(self, name=""):
- """Construct a TarInfo object. name is the optional name
- of the member.
- """
- self.name = name # member name
- self.mode = 0o644 # file permissions
- self.uid = 0 # user id
- self.gid = 0 # group id
- self.size = 0 # file size
- self.mtime = 0 # modification time
- self.chksum = 0 # header checksum
- self.type = REGTYPE # member type
- self.linkname = "" # link name
- self.uname = "" # user name
- self.gname = "" # group name
- self.devmajor = 0 # device major number
- self.devminor = 0 # device minor number
-
- self.offset = 0 # the tar header starts here
- self.offset_data = 0 # the file's data starts here
-
- self.sparse = None # sparse member information
- self.pax_headers = {} # pax header information
-
- # In pax headers the "name" and "linkname" field are called
- # "path" and "linkpath".
- def _getpath(self):
- return self.name
- def _setpath(self, name):
- self.name = name
- path = property(_getpath, _setpath)
-
- def _getlinkpath(self):
- return self.linkname
- def _setlinkpath(self, linkname):
- self.linkname = linkname
- linkpath = property(_getlinkpath, _setlinkpath)
-
- def __repr__(self):
- return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
-
- def get_info(self):
- """Return the TarInfo's attributes as a dictionary.
- """
- info = {
- "name": self.name,
- "mode": self.mode & 0o7777,
- "uid": self.uid,
- "gid": self.gid,
- "size": self.size,
- "mtime": self.mtime,
- "chksum": self.chksum,
- "type": self.type,
- "linkname": self.linkname,
- "uname": self.uname,
- "gname": self.gname,
- "devmajor": self.devmajor,
- "devminor": self.devminor
- }
-
- if info["type"] == DIRTYPE and not info["name"].endswith("/"):
- info["name"] += "/"
-
- return info
-
- def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
- """Return a tar header as a string of 512 byte blocks.
- """
- info = self.get_info()
-
- if format == USTAR_FORMAT:
- return self.create_ustar_header(info, encoding, errors)
- elif format == GNU_FORMAT:
- return self.create_gnu_header(info, encoding, errors)
- elif format == PAX_FORMAT:
- return self.create_pax_header(info, encoding)
- else:
- raise ValueError("invalid format")
-
- def create_ustar_header(self, info, encoding, errors):
- """Return the object as a ustar header block.
- """
- info["magic"] = POSIX_MAGIC
-
- if len(info["linkname"]) > LENGTH_LINK:
- raise ValueError("linkname is too long")
-
- if len(info["name"]) > LENGTH_NAME:
- info["prefix"], info["name"] = self._posix_split_name(info["name"])
-
- return self._create_header(info, USTAR_FORMAT, encoding, errors)
-
- def create_gnu_header(self, info, encoding, errors):
- """Return the object as a GNU header block sequence.
- """
- info["magic"] = GNU_MAGIC
-
- buf = b""
- if len(info["linkname"]) > LENGTH_LINK:
- buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
-
- if len(info["name"]) > LENGTH_NAME:
- buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
-
- return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
-
- def create_pax_header(self, info, encoding):
- """Return the object as a ustar header block. If it cannot be
- represented this way, prepend a pax extended header sequence
- with supplement information.
- """
- info["magic"] = POSIX_MAGIC
- pax_headers = self.pax_headers.copy()
-
- # Test string fields for values that exceed the field length or cannot
- # be represented in ASCII encoding.
- for name, hname, length in (
- ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
- ("uname", "uname", 32), ("gname", "gname", 32)):
-
- if hname in pax_headers:
- # The pax header has priority.
- continue
-
- # Try to encode the string as ASCII.
- try:
- info[name].encode("ascii", "strict")
- except UnicodeEncodeError:
- pax_headers[hname] = info[name]
- continue
-
- if len(info[name]) > length:
- pax_headers[hname] = info[name]
-
- # Test number fields for values that exceed the field limit or values
- # that like to be stored as float.
- for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
- if name in pax_headers:
- # The pax header has priority. Avoid overflow.
- info[name] = 0
- continue
-
- val = info[name]
- if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
- pax_headers[name] = str(val)
- info[name] = 0
-
- # Create a pax extended header if necessary.
- if pax_headers:
- buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
- else:
- buf = b""
-
- return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
-
- @classmethod
- def create_pax_global_header(cls, pax_headers):
- """Return the object as a pax global header block sequence.
- """
- return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8")
-
- def _posix_split_name(self, name):
- """Split a name longer than 100 chars into a prefix
- and a name part.
- """
- prefix = name[:LENGTH_PREFIX + 1]
- while prefix and prefix[-1] != "/":
- prefix = prefix[:-1]
-
- name = name[len(prefix):]
- prefix = prefix[:-1]
-
- if not prefix or len(name) > LENGTH_NAME:
- raise ValueError("name is too long")
- return prefix, name
-
- @staticmethod
- def _create_header(info, format, encoding, errors):
- """Return a header block. info is a dictionary with file
- information, format must be one of the *_FORMAT constants.
- """
- parts = [
- stn(info.get("name", ""), 100, encoding, errors),
- itn(info.get("mode", 0) & 0o7777, 8, format),
- itn(info.get("uid", 0), 8, format),
- itn(info.get("gid", 0), 8, format),
- itn(info.get("size", 0), 12, format),
- itn(info.get("mtime", 0), 12, format),
- b" ", # checksum field
- info.get("type", REGTYPE),
- stn(info.get("linkname", ""), 100, encoding, errors),
- info.get("magic", POSIX_MAGIC),
- stn(info.get("uname", ""), 32, encoding, errors),
- stn(info.get("gname", ""), 32, encoding, errors),
- itn(info.get("devmajor", 0), 8, format),
- itn(info.get("devminor", 0), 8, format),
- stn(info.get("prefix", ""), 155, encoding, errors)
- ]
-
- buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
- chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
- buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:]
- return buf
-
- @staticmethod
- def _create_payload(payload):
- """Return the string payload filled with zero bytes
- up to the next 512 byte border.
- """
- blocks, remainder = divmod(len(payload), BLOCKSIZE)
- if remainder > 0:
- payload += (BLOCKSIZE - remainder) * NUL
- return payload
-
- @classmethod
- def _create_gnu_long_header(cls, name, type, encoding, errors):
- """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
- for name.
- """
- name = name.encode(encoding, errors) + NUL
-
- info = {}
- info["name"] = "././@LongLink"
- info["type"] = type
- info["size"] = len(name)
- info["magic"] = GNU_MAGIC
-
- # create extended header + name blocks.
- return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
- cls._create_payload(name)
-
- @classmethod
- def _create_pax_generic_header(cls, pax_headers, type, encoding):
- """Return a POSIX.1-2008 extended or global header sequence
- that contains a list of keyword, value pairs. The values
- must be strings.
- """
- # Check if one of the fields contains surrogate characters and thereby
- # forces hdrcharset=BINARY, see _proc_pax() for more information.
- binary = False
- for keyword, value in pax_headers.items():
- try:
- value.encode("utf8", "strict")
- except UnicodeEncodeError:
- binary = True
- break
-
- records = b""
- if binary:
- # Put the hdrcharset field at the beginning of the header.
- records += b"21 hdrcharset=BINARY\n"
-
- for keyword, value in pax_headers.items():
- keyword = keyword.encode("utf8")
- if binary:
- # Try to restore the original byte representation of `value'.
- # Needless to say, that the encoding must match the string.
- value = value.encode(encoding, "surrogateescape")
- else:
- value = value.encode("utf8")
-
- l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
- n = p = 0
- while True:
- n = l + len(str(p))
- if n == p:
- break
- p = n
- records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
-
- # We use a hardcoded "././@PaxHeader" name like star does
- # instead of the one that POSIX recommends.
- info = {}
- info["name"] = "././@PaxHeader"
- info["type"] = type
- info["size"] = len(records)
- info["magic"] = POSIX_MAGIC
-
- # Create pax header + record blocks.
- return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
- cls._create_payload(records)
-
- @classmethod
- def frombuf(cls, buf, encoding, errors):
- """Construct a TarInfo object from a 512 byte bytes object.
- """
- if len(buf) == 0:
- raise EmptyHeaderError("empty header")
- if len(buf) != BLOCKSIZE:
- raise TruncatedHeaderError("truncated header")
- if buf.count(NUL) == BLOCKSIZE:
- raise EOFHeaderError("end of file header")
-
- chksum = nti(buf[148:156])
- if chksum not in calc_chksums(buf):
- raise InvalidHeaderError("bad checksum")
-
- obj = cls()
- obj.name = nts(buf[0:100], encoding, errors)
- obj.mode = nti(buf[100:108])
- obj.uid = nti(buf[108:116])
- obj.gid = nti(buf[116:124])
- obj.size = nti(buf[124:136])
- obj.mtime = nti(buf[136:148])
- obj.chksum = chksum
- obj.type = buf[156:157]
- obj.linkname = nts(buf[157:257], encoding, errors)
- obj.uname = nts(buf[265:297], encoding, errors)
- obj.gname = nts(buf[297:329], encoding, errors)
- obj.devmajor = nti(buf[329:337])
- obj.devminor = nti(buf[337:345])
- prefix = nts(buf[345:500], encoding, errors)
-
- # Old V7 tar format represents a directory as a regular
- # file with a trailing slash.
- if obj.type == AREGTYPE and obj.name.endswith("/"):
- obj.type = DIRTYPE
-
- # The old GNU sparse format occupies some of the unused
- # space in the buffer for up to 4 sparse structures.
- # Save the them for later processing in _proc_sparse().
- if obj.type == GNUTYPE_SPARSE:
- pos = 386
- structs = []
- for i in range(4):
- try:
- offset = nti(buf[pos:pos + 12])
- numbytes = nti(buf[pos + 12:pos + 24])
- except ValueError:
- break
- structs.append((offset, numbytes))
- pos += 24
- isextended = bool(buf[482])
- origsize = nti(buf[483:495])
- obj._sparse_structs = (structs, isextended, origsize)
-
- # Remove redundant slashes from directories.
- if obj.isdir():
- obj.name = obj.name.rstrip("/")
-
- # Reconstruct a ustar longname.
- if prefix and obj.type not in GNU_TYPES:
- obj.name = prefix + "/" + obj.name
- return obj
-
- @classmethod
- def fromtarfile(cls, tarfile):
- """Return the next TarInfo object from TarFile object
- tarfile.
- """
- buf = tarfile.fileobj.read(BLOCKSIZE)
- obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
- obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
- return obj._proc_member(tarfile)
-
- #--------------------------------------------------------------------------
- # The following are methods that are called depending on the type of a
- # member. The entry point is _proc_member() which can be overridden in a
- # subclass to add custom _proc_*() methods. A _proc_*() method MUST
- # implement the following
- # operations:
- # 1. Set self.offset_data to the position where the data blocks begin,
- # if there is data that follows.
- # 2. Set tarfile.offset to the position where the next member's header will
- # begin.
- # 3. Return self or another valid TarInfo object.
- def _proc_member(self, tarfile):
- """Choose the right processing method depending on
- the type and call it.
- """
- if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
- return self._proc_gnulong(tarfile)
- elif self.type == GNUTYPE_SPARSE:
- return self._proc_sparse(tarfile)
- elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
- return self._proc_pax(tarfile)
- else:
- return self._proc_builtin(tarfile)
-
- def _proc_builtin(self, tarfile):
- """Process a builtin type or an unknown type which
- will be treated as a regular file.
- """
- self.offset_data = tarfile.fileobj.tell()
- offset = self.offset_data
- if self.isreg() or self.type not in SUPPORTED_TYPES:
- # Skip the following data blocks.
- offset += self._block(self.size)
- tarfile.offset = offset
-
- # Patch the TarInfo object with saved global
- # header information.
- self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
-
- return self
-
- def _proc_gnulong(self, tarfile):
- """Process the blocks that hold a GNU longname
- or longlink member.
- """
- buf = tarfile.fileobj.read(self._block(self.size))
-
- # Fetch the next header and process it.
- try:
- next = self.fromtarfile(tarfile)
- except HeaderError:
- raise SubsequentHeaderError("missing or bad subsequent header")
-
- # Patch the TarInfo object from the next header with
- # the longname information.
- next.offset = self.offset
- if self.type == GNUTYPE_LONGNAME:
- next.name = nts(buf, tarfile.encoding, tarfile.errors)
- elif self.type == GNUTYPE_LONGLINK:
- next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
-
- return next
-
- def _proc_sparse(self, tarfile):
- """Process a GNU sparse header plus extra headers.
- """
- # We already collected some sparse structures in frombuf().
- structs, isextended, origsize = self._sparse_structs
- del self._sparse_structs
-
- # Collect sparse structures from extended header blocks.
- while isextended:
- buf = tarfile.fileobj.read(BLOCKSIZE)
- pos = 0
- for i in range(21):
- try:
- offset = nti(buf[pos:pos + 12])
- numbytes = nti(buf[pos + 12:pos + 24])
- except ValueError:
- break
- if offset and numbytes:
- structs.append((offset, numbytes))
- pos += 24
- isextended = bool(buf[504])
- self.sparse = structs
-
- self.offset_data = tarfile.fileobj.tell()
- tarfile.offset = self.offset_data + self._block(self.size)
- self.size = origsize
- return self
-
- def _proc_pax(self, tarfile):
- """Process an extended or global header as described in
- POSIX.1-2008.
- """
- # Read the header information.
- buf = tarfile.fileobj.read(self._block(self.size))
-
- # A pax header stores supplemental information for either
- # the following file (extended) or all following files
- # (global).
- if self.type == XGLTYPE:
- pax_headers = tarfile.pax_headers
- else:
- pax_headers = tarfile.pax_headers.copy()
-
- # Check if the pax header contains a hdrcharset field. This tells us
- # the encoding of the path, linkpath, uname and gname fields. Normally,
- # these fields are UTF-8 encoded but since POSIX.1-2008 tar
- # implementations are allowed to store them as raw binary strings if
- # the translation to UTF-8 fails.
- match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
- if match is not None:
- pax_headers["hdrcharset"] = match.group(1).decode("utf8")
-
- # For the time being, we don't care about anything other than "BINARY".
- # The only other value that is currently allowed by the standard is
- # "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
- hdrcharset = pax_headers.get("hdrcharset")
- if hdrcharset == "BINARY":
- encoding = tarfile.encoding
- else:
- encoding = "utf8"
-
- # Parse pax header information. A record looks like that:
- # "%d %s=%s\n" % (length, keyword, value). length is the size
- # of the complete record including the length field itself and
- # the newline. keyword and value are both UTF-8 encoded strings.
- regex = re.compile(br"(\d+) ([^=]+)=")
- pos = 0
- while True:
- match = regex.match(buf, pos)
- if not match:
- break
-
- length, keyword = match.groups()
- length = int(length)
- value = buf[match.end(2) + 1:match.start(1) + length - 1]
-
- # Normally, we could just use "utf8" as the encoding and "strict"
- # as the error handler, but we better not take the risk. For
- # example, GNU tar <= 1.23 is known to store filenames it cannot
- # translate to UTF-8 as raw strings (unfortunately without a
- # hdrcharset=BINARY header).
- # We first try the strict standard encoding, and if that fails we
- # fall back on the user's encoding and error handler.
- keyword = self._decode_pax_field(keyword, "utf8", "utf8",
- tarfile.errors)
- if keyword in PAX_NAME_FIELDS:
- value = self._decode_pax_field(value, encoding, tarfile.encoding,
- tarfile.errors)
- else:
- value = self._decode_pax_field(value, "utf8", "utf8",
- tarfile.errors)
-
- pax_headers[keyword] = value
- pos += length
-
- # Fetch the next header.
- try:
- next = self.fromtarfile(tarfile)
- except HeaderError:
- raise SubsequentHeaderError("missing or bad subsequent header")
-
- # Process GNU sparse information.
- if "GNU.sparse.map" in pax_headers:
- # GNU extended sparse format version 0.1.
- self._proc_gnusparse_01(next, pax_headers)
-
- elif "GNU.sparse.size" in pax_headers:
- # GNU extended sparse format version 0.0.
- self._proc_gnusparse_00(next, pax_headers, buf)
-
- elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
- # GNU extended sparse format version 1.0.
- self._proc_gnusparse_10(next, pax_headers, tarfile)
-
- if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
- # Patch the TarInfo object with the extended header info.
- next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
- next.offset = self.offset
-
- if "size" in pax_headers:
- # If the extended header replaces the size field,
- # we need to recalculate the offset where the next
- # header starts.
- offset = next.offset_data
- if next.isreg() or next.type not in SUPPORTED_TYPES:
- offset += next._block(next.size)
- tarfile.offset = offset
-
- return next
-
- def _proc_gnusparse_00(self, next, pax_headers, buf):
- """Process a GNU tar extended sparse header, version 0.0.
- """
- offsets = []
- for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
- offsets.append(int(match.group(1)))
- numbytes = []
- for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
- numbytes.append(int(match.group(1)))
- next.sparse = list(zip(offsets, numbytes))
-
- def _proc_gnusparse_01(self, next, pax_headers):
- """Process a GNU tar extended sparse header, version 0.1.
- """
- sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
- next.sparse = list(zip(sparse[::2], sparse[1::2]))
-
- def _proc_gnusparse_10(self, next, pax_headers, tarfile):
- """Process a GNU tar extended sparse header, version 1.0.
- """
- fields = None
- sparse = []
- buf = tarfile.fileobj.read(BLOCKSIZE)
- fields, buf = buf.split(b"\n", 1)
- fields = int(fields)
- while len(sparse) < fields * 2:
- if b"\n" not in buf:
- buf += tarfile.fileobj.read(BLOCKSIZE)
- number, buf = buf.split(b"\n", 1)
- sparse.append(int(number))
- next.offset_data = tarfile.fileobj.tell()
- next.sparse = list(zip(sparse[::2], sparse[1::2]))
-
- def _apply_pax_info(self, pax_headers, encoding, errors):
- """Replace fields with supplemental information from a previous
- pax extended or global header.
- """
- for keyword, value in pax_headers.items():
- if keyword == "GNU.sparse.name":
- setattr(self, "path", value)
- elif keyword == "GNU.sparse.size":
- setattr(self, "size", int(value))
- elif keyword == "GNU.sparse.realsize":
- setattr(self, "size", int(value))
- elif keyword in PAX_FIELDS:
- if keyword in PAX_NUMBER_FIELDS:
- try:
- value = PAX_NUMBER_FIELDS[keyword](value)
- except ValueError:
- value = 0
- if keyword == "path":
- value = value.rstrip("/")
- setattr(self, keyword, value)
-
- self.pax_headers = pax_headers.copy()
-
- def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
- """Decode a single field from a pax record.
- """
- try:
- return value.decode(encoding, "strict")
- except UnicodeDecodeError:
- return value.decode(fallback_encoding, fallback_errors)
-
- def _block(self, count):
- """Round up a byte count by BLOCKSIZE and return it,
- e.g. _block(834) => 1024.
- """
- blocks, remainder = divmod(count, BLOCKSIZE)
- if remainder:
- blocks += 1
- return blocks * BLOCKSIZE
-
- def isreg(self):
- return self.type in REGULAR_TYPES
- def isfile(self):
- return self.isreg()
- def isdir(self):
- return self.type == DIRTYPE
- def issym(self):
- return self.type == SYMTYPE
- def islnk(self):
- return self.type == LNKTYPE
- def ischr(self):
- return self.type == CHRTYPE
- def isblk(self):
- return self.type == BLKTYPE
- def isfifo(self):
- return self.type == FIFOTYPE
- def issparse(self):
- return self.sparse is not None
- def isdev(self):
- return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
-# class TarInfo
-
-class TarFile(object):
- """The TarFile Class provides an interface to tar archives.
- """
-
- debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
-
- dereference = False # If true, add content of linked file to the
- # tar file, else the link.
-
- ignore_zeros = False # If true, skips empty or invalid blocks and
- # continues processing.
-
- errorlevel = 1 # If 0, fatal errors only appear in debug
- # messages (if debug >= 0). If > 0, errors
- # are passed to the caller as exceptions.
-
- format = DEFAULT_FORMAT # The format to use when creating an archive.
-
- encoding = ENCODING # Encoding for 8-bit character strings.
-
- errors = None # Error handler for unicode conversion.
-
- tarinfo = TarInfo # The default TarInfo class to use.
-
- fileobject = ExFileObject # The default ExFileObject class to use.
-
- def __init__(self, name=None, mode="r", fileobj=None, format=None,
- tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
- errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
- """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
- read from an existing archive, 'a' to append data to an existing
- file or 'w' to create a new file overwriting an existing one. `mode'
- defaults to 'r'.
- If `fileobj' is given, it is used for reading or writing data. If it
- can be determined, `mode' is overridden by `fileobj's mode.
- `fileobj' is not closed, when TarFile is closed.
- """
- if len(mode) > 1 or mode not in "raw":
- raise ValueError("mode must be 'r', 'a' or 'w'")
- self.mode = mode
- self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
-
- if not fileobj:
- if self.mode == "a" and not os.path.exists(name):
- # Create nonexistent files in append mode.
- self.mode = "w"
- self._mode = "wb"
- fileobj = bltn_open(name, self._mode)
- self._extfileobj = False
- else:
- if name is None and hasattr(fileobj, "name"):
- name = fileobj.name
- if hasattr(fileobj, "mode"):
- self._mode = fileobj.mode
- self._extfileobj = True
- self.name = os.path.abspath(name) if name else None
- self.fileobj = fileobj
-
- # Init attributes.
- if format is not None:
- self.format = format
- if tarinfo is not None:
- self.tarinfo = tarinfo
- if dereference is not None:
- self.dereference = dereference
- if ignore_zeros is not None:
- self.ignore_zeros = ignore_zeros
- if encoding is not None:
- self.encoding = encoding
- self.errors = errors
-
- if pax_headers is not None and self.format == PAX_FORMAT:
- self.pax_headers = pax_headers
- else:
- self.pax_headers = {}
-
- if debug is not None:
- self.debug = debug
- if errorlevel is not None:
- self.errorlevel = errorlevel
-
- # Init datastructures.
- self.closed = False
- self.members = [] # list of members as TarInfo objects
- self._loaded = False # flag if all members have been read
- self.offset = self.fileobj.tell()
- # current position in the archive file
- self.inodes = {} # dictionary caching the inodes of
- # archive members already added
-
- try:
- if self.mode == "r":
- self.firstmember = None
- self.firstmember = self.next()
-
- if self.mode == "a":
- # Move to the end of the archive,
- # before the first empty block.
- while True:
- self.fileobj.seek(self.offset)
- try:
- tarinfo = self.tarinfo.fromtarfile(self)
- self.members.append(tarinfo)
- except EOFHeaderError:
- self.fileobj.seek(self.offset)
- break
- except HeaderError as e:
- raise ReadError(str(e))
-
- if self.mode in "aw":
- self._loaded = True
-
- if self.pax_headers:
- buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
- self.fileobj.write(buf)
- self.offset += len(buf)
- except:
- if not self._extfileobj:
- self.fileobj.close()
- self.closed = True
- raise
-
- #--------------------------------------------------------------------------
- # Below are the classmethods which act as alternate constructors to the
- # TarFile class. The open() method is the only one that is needed for
- # public use; it is the "super"-constructor and is able to select an
- # adequate "sub"-constructor for a particular compression using the mapping
- # from OPEN_METH.
- #
- # This concept allows one to subclass TarFile without losing the comfort of
- # the super-constructor. A sub-constructor is registered and made available
- # by adding it to the mapping in OPEN_METH.
-
- @classmethod
- def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
- """Open a tar archive for reading, writing or appending. Return
- an appropriate TarFile class.
-
- mode:
- 'r' or 'r:*' open for reading with transparent compression
- 'r:' open for reading exclusively uncompressed
- 'r:gz' open for reading with gzip compression
- 'r:bz2' open for reading with bzip2 compression
- 'a' or 'a:' open for appending, creating the file if necessary
- 'w' or 'w:' open for writing without compression
- 'w:gz' open for writing with gzip compression
- 'w:bz2' open for writing with bzip2 compression
-
- 'r|*' open a stream of tar blocks with transparent compression
- 'r|' open an uncompressed stream of tar blocks for reading
- 'r|gz' open a gzip compressed stream of tar blocks
- 'r|bz2' open a bzip2 compressed stream of tar blocks
- 'w|' open an uncompressed stream for writing
- 'w|gz' open a gzip compressed stream for writing
- 'w|bz2' open a bzip2 compressed stream for writing
- """
-
- if not name and not fileobj:
- raise ValueError("nothing to open")
-
- if mode in ("r", "r:*"):
- # Find out which *open() is appropriate for opening the file.
- for comptype in cls.OPEN_METH:
- func = getattr(cls, cls.OPEN_METH[comptype])
- if fileobj is not None:
- saved_pos = fileobj.tell()
- try:
- return func(name, "r", fileobj, **kwargs)
- except (ReadError, CompressionError) as e:
- if fileobj is not None:
- fileobj.seek(saved_pos)
- continue
- raise ReadError("file could not be opened successfully")
-
- elif ":" in mode:
- filemode, comptype = mode.split(":", 1)
- filemode = filemode or "r"
- comptype = comptype or "tar"
-
- # Select the *open() function according to
- # given compression.
- if comptype in cls.OPEN_METH:
- func = getattr(cls, cls.OPEN_METH[comptype])
- else:
- raise CompressionError("unknown compression type %r" % comptype)
- return func(name, filemode, fileobj, **kwargs)
-
- elif "|" in mode:
- filemode, comptype = mode.split("|", 1)
- filemode = filemode or "r"
- comptype = comptype or "tar"
-
- if filemode not in "rw":
- raise ValueError("mode must be 'r' or 'w'")
-
- stream = _Stream(name, filemode, comptype, fileobj, bufsize)
- try:
- t = cls(name, filemode, stream, **kwargs)
- except:
- stream.close()
- raise
- t._extfileobj = False
- return t
-
- elif mode in "aw":
- return cls.taropen(name, mode, fileobj, **kwargs)
-
- raise ValueError("undiscernible mode")
-
- @classmethod
- def taropen(cls, name, mode="r", fileobj=None, **kwargs):
- """Open uncompressed tar archive name for reading or writing.
- """
- if len(mode) > 1 or mode not in "raw":
- raise ValueError("mode must be 'r', 'a' or 'w'")
- return cls(name, mode, fileobj, **kwargs)
-
- @classmethod
- def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
- """Open gzip compressed tar archive name for reading or writing.
- Appending is not allowed.
- """
- if len(mode) > 1 or mode not in "rw":
- raise ValueError("mode must be 'r' or 'w'")
-
- try:
- import gzip
- gzip.GzipFile
- except (ImportError, AttributeError):
- raise CompressionError("gzip module is not available")
-
- extfileobj = fileobj is not None
- try:
- fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
- t = cls.taropen(name, mode, fileobj, **kwargs)
- except IOError:
- if not extfileobj and fileobj is not None:
- fileobj.close()
- if fileobj is None:
- raise
- raise ReadError("not a gzip file")
- except:
- if not extfileobj and fileobj is not None:
- fileobj.close()
- raise
- t._extfileobj = extfileobj
- return t
-
- @classmethod
- def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
- """Open bzip2 compressed tar archive name for reading or writing.
- Appending is not allowed.
- """
- if len(mode) > 1 or mode not in "rw":
- raise ValueError("mode must be 'r' or 'w'.")
-
- try:
- import bz2
- except ImportError:
- raise CompressionError("bz2 module is not available")
-
- if fileobj is not None:
- fileobj = _BZ2Proxy(fileobj, mode)
- else:
- fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
-
- try:
- t = cls.taropen(name, mode, fileobj, **kwargs)
- except (IOError, EOFError):
- fileobj.close()
- raise ReadError("not a bzip2 file")
- t._extfileobj = False
- return t
-
- # All *open() methods are registered here.
- OPEN_METH = {
- "tar": "taropen", # uncompressed tar
- "gz": "gzopen", # gzip compressed tar
- "bz2": "bz2open" # bzip2 compressed tar
- }
-
- #--------------------------------------------------------------------------
- # The public methods which TarFile provides:
-
- def close(self):
- """Close the TarFile. In write-mode, two finishing zero blocks are
- appended to the archive.
- """
- if self.closed:
- return
-
- if self.mode in "aw":
- self.fileobj.write(NUL * (BLOCKSIZE * 2))
- self.offset += (BLOCKSIZE * 2)
- # fill up the end with zero-blocks
- # (like option -b20 for tar does)
- blocks, remainder = divmod(self.offset, RECORDSIZE)
- if remainder > 0:
- self.fileobj.write(NUL * (RECORDSIZE - remainder))
-
- if not self._extfileobj:
- self.fileobj.close()
- self.closed = True
-
- def getmember(self, name):
- """Return a TarInfo object for member `name'. If `name' can not be
- found in the archive, KeyError is raised. If a member occurs more
- than once in the archive, its last occurrence is assumed to be the
- most up-to-date version.
- """
- tarinfo = self._getmember(name)
- if tarinfo is None:
- raise KeyError("filename %r not found" % name)
- return tarinfo
-
- def getmembers(self):
- """Return the members of the archive as a list of TarInfo objects. The
- list has the same order as the members in the archive.
- """
- self._check()
- if not self._loaded: # if we want to obtain a list of
- self._load() # all members, we first have to
- # scan the whole archive.
- return self.members
-
- def getnames(self):
- """Return the members of the archive as a list of their names. It has
- the same order as the list returned by getmembers().
- """
- return [tarinfo.name for tarinfo in self.getmembers()]
-
- def gettarinfo(self, name=None, arcname=None, fileobj=None):
- """Create a TarInfo object for either the file `name' or the file
- object `fileobj' (using os.fstat on its file descriptor). You can
- modify some of the TarInfo's attributes before you add it using
- addfile(). If given, `arcname' specifies an alternative name for the
- file in the archive.
- """
- self._check("aw")
-
- # When fileobj is given, replace name by
- # fileobj's real name.
- if fileobj is not None:
- name = fileobj.name
-
- # Building the name of the member in the archive.
- # Backward slashes are converted to forward slashes,
- # Absolute paths are turned to relative paths.
- if arcname is None:
- arcname = name
- drv, arcname = os.path.splitdrive(arcname)
- arcname = arcname.replace(os.sep, "/")
- arcname = arcname.lstrip("/")
-
- # Now, fill the TarInfo object with
- # information specific for the file.
- tarinfo = self.tarinfo()
- tarinfo.tarfile = self
-
- # Use os.stat or os.lstat, depending on platform
- # and if symlinks shall be resolved.
- if fileobj is None:
- if hasattr(os, "lstat") and not self.dereference:
- statres = os.lstat(name)
- else:
- statres = os.stat(name)
- else:
- statres = os.fstat(fileobj.fileno())
- linkname = ""
-
- stmd = statres.st_mode
- if stat.S_ISREG(stmd):
- inode = (statres.st_ino, statres.st_dev)
- if not self.dereference and statres.st_nlink > 1 and \
- inode in self.inodes and arcname != self.inodes[inode]:
- # Is it a hardlink to an already
- # archived file?
- type = LNKTYPE
- linkname = self.inodes[inode]
- else:
- # The inode is added only if its valid.
- # For win32 it is always 0.
- type = REGTYPE
- if inode[0]:
- self.inodes[inode] = arcname
- elif stat.S_ISDIR(stmd):
- type = DIRTYPE
- elif stat.S_ISFIFO(stmd):
- type = FIFOTYPE
- elif stat.S_ISLNK(stmd):
- type = SYMTYPE
- linkname = os.readlink(name)
- elif stat.S_ISCHR(stmd):
- type = CHRTYPE
- elif stat.S_ISBLK(stmd):
- type = BLKTYPE
- else:
- return None
-
- # Fill the TarInfo object with all
- # information we can get.
- tarinfo.name = arcname
- tarinfo.mode = stmd
- tarinfo.uid = statres.st_uid
- tarinfo.gid = statres.st_gid
- if type == REGTYPE:
- tarinfo.size = statres.st_size
- else:
- tarinfo.size = 0
- tarinfo.mtime = statres.st_mtime
- tarinfo.type = type
- tarinfo.linkname = linkname
- if pwd:
- try:
- tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
- except KeyError:
- pass
- if grp:
- try:
- tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
- except KeyError:
- pass
-
- if type in (CHRTYPE, BLKTYPE):
- if hasattr(os, "major") and hasattr(os, "minor"):
- tarinfo.devmajor = os.major(statres.st_rdev)
- tarinfo.devminor = os.minor(statres.st_rdev)
- return tarinfo
-
- def list(self, verbose=True):
- """Print a table of contents to sys.stdout. If `verbose' is False, only
- the names of the members are printed. If it is True, an `ls -l'-like
- output is produced.
- """
- self._check()
-
- for tarinfo in self:
- if verbose:
- print(filemode(tarinfo.mode), end=' ')
- print("%s/%s" % (tarinfo.uname or tarinfo.uid,
- tarinfo.gname or tarinfo.gid), end=' ')
- if tarinfo.ischr() or tarinfo.isblk():
- print("%10s" % ("%d,%d" \
- % (tarinfo.devmajor, tarinfo.devminor)), end=' ')
- else:
- print("%10d" % tarinfo.size, end=' ')
- print("%d-%02d-%02d %02d:%02d:%02d" \
- % time.localtime(tarinfo.mtime)[:6], end=' ')
-
- print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
-
- if verbose:
- if tarinfo.issym():
- print("->", tarinfo.linkname, end=' ')
- if tarinfo.islnk():
- print("link to", tarinfo.linkname, end=' ')
- print()
-
- def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
- """Add the file `name' to the archive. `name' may be any type of file
- (directory, fifo, symbolic link, etc.). If given, `arcname'
- specifies an alternative name for the file in the archive.
- Directories are added recursively by default. This can be avoided by
- setting `recursive' to False. `exclude' is a function that should
- return True for each filename to be excluded. `filter' is a function
- that expects a TarInfo object argument and returns the changed
- TarInfo object, if it returns None the TarInfo object will be
- excluded from the archive.
- """
- self._check("aw")
-
- if arcname is None:
- arcname = name
-
- # Exclude pathnames.
- if exclude is not None:
- import warnings
- warnings.warn("use the filter argument instead",
- DeprecationWarning, 2)
- if exclude(name):
- self._dbg(2, "tarfile: Excluded %r" % name)
- return
-
- # Skip if somebody tries to archive the archive...
- if self.name is not None and os.path.abspath(name) == self.name:
- self._dbg(2, "tarfile: Skipped %r" % name)
- return
-
- self._dbg(1, name)
-
- # Create a TarInfo object from the file.
- tarinfo = self.gettarinfo(name, arcname)
-
- if tarinfo is None:
- self._dbg(1, "tarfile: Unsupported type %r" % name)
- return
-
- # Change or exclude the TarInfo object.
- if filter is not None:
- tarinfo = filter(tarinfo)
- if tarinfo is None:
- self._dbg(2, "tarfile: Excluded %r" % name)
- return
-
- # Append the tar header and data to the archive.
- if tarinfo.isreg():
- f = bltn_open(name, "rb")
- self.addfile(tarinfo, f)
- f.close()
-
- elif tarinfo.isdir():
- self.addfile(tarinfo)
- if recursive:
- for f in os.listdir(name):
- self.add(os.path.join(name, f), os.path.join(arcname, f),
- recursive, exclude, filter=filter)
-
- else:
- self.addfile(tarinfo)
-
- def addfile(self, tarinfo, fileobj=None):
- """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
- given, tarinfo.size bytes are read from it and added to the archive.
- You can create TarInfo objects using gettarinfo().
- On Windows platforms, `fileobj' should always be opened with mode
- 'rb' to avoid irritation about the file size.
- """
- self._check("aw")
-
- tarinfo = copy.copy(tarinfo)
-
- buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
- self.fileobj.write(buf)
- self.offset += len(buf)
-
- # If there's data to follow, append it.
- if fileobj is not None:
- copyfileobj(fileobj, self.fileobj, tarinfo.size)
- blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
- if remainder > 0:
- self.fileobj.write(NUL * (BLOCKSIZE - remainder))
- blocks += 1
- self.offset += blocks * BLOCKSIZE
-
- self.members.append(tarinfo)
-
- def extractall(self, path=".", members=None):
- """Extract all members from the archive to the current working
- directory and set owner, modification time and permissions on
- directories afterwards. `path' specifies a different directory
- to extract to. `members' is optional and must be a subset of the
- list returned by getmembers().
- """
- directories = []
-
- if members is None:
- members = self
-
- for tarinfo in members:
- if tarinfo.isdir():
- # Extract directories with a safe mode.
- directories.append(tarinfo)
- tarinfo = copy.copy(tarinfo)
- tarinfo.mode = 0o700
- # Do not set_attrs directories, as we will do that further down
- self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
-
- # Reverse sort directories.
- directories.sort(key=lambda a: a.name)
- directories.reverse()
-
- # Set correct owner, mtime and filemode on directories.
- for tarinfo in directories:
- dirpath = os.path.join(path, tarinfo.name)
- try:
- self.chown(tarinfo, dirpath)
- self.utime(tarinfo, dirpath)
- self.chmod(tarinfo, dirpath)
- except ExtractError as e:
- if self.errorlevel > 1:
- raise
- else:
- self._dbg(1, "tarfile: %s" % e)
-
- def extract(self, member, path="", set_attrs=True):
- """Extract a member from the archive to the current working directory,
- using its full name. Its file information is extracted as accurately
- as possible. `member' may be a filename or a TarInfo object. You can
- specify a different directory using `path'. File attributes (owner,
- mtime, mode) are set unless `set_attrs' is False.
- """
- self._check("r")
-
- if isinstance(member, str):
- tarinfo = self.getmember(member)
- else:
- tarinfo = member
-
- # Prepare the link target for makelink().
- if tarinfo.islnk():
- tarinfo._link_target = os.path.join(path, tarinfo.linkname)
-
- try:
- self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
- set_attrs=set_attrs)
- except EnvironmentError as e:
- if self.errorlevel > 0:
- raise
- else:
- if e.filename is None:
- self._dbg(1, "tarfile: %s" % e.strerror)
- else:
- self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
- except ExtractError as e:
- if self.errorlevel > 1:
- raise
- else:
- self._dbg(1, "tarfile: %s" % e)
-
- def extractfile(self, member):
- """Extract a member from the archive as a file object. `member' may be
- a filename or a TarInfo object. If `member' is a regular file, a
- file-like object is returned. If `member' is a link, a file-like
- object is constructed from the link's target. If `member' is none of
- the above, None is returned.
- The file-like object is read-only and provides the following
- methods: read(), readline(), readlines(), seek() and tell()
- """
- self._check("r")
-
- if isinstance(member, str):
- tarinfo = self.getmember(member)
- else:
- tarinfo = member
-
- if tarinfo.isreg():
- return self.fileobject(self, tarinfo)
-
- elif tarinfo.type not in SUPPORTED_TYPES:
- # If a member's type is unknown, it is treated as a
- # regular file.
- return self.fileobject(self, tarinfo)
-
- elif tarinfo.islnk() or tarinfo.issym():
- if isinstance(self.fileobj, _Stream):
- # A small but ugly workaround for the case that someone tries
- # to extract a (sym)link as a file-object from a non-seekable
- # stream of tar blocks.
- raise StreamError("cannot extract (sym)link as file object")
- else:
- # A (sym)link's file object is its target's file object.
- return self.extractfile(self._find_link_target(tarinfo))
- else:
- # If there's no data associated with the member (directory, chrdev,
- # blkdev, etc.), return None instead of a file object.
- return None
-
- def _extract_member(self, tarinfo, targetpath, set_attrs=True):
- """Extract the TarInfo object tarinfo to a physical
- file called targetpath.
- """
- # Fetch the TarInfo object for the given name
- # and build the destination pathname, replacing
- # forward slashes to platform specific separators.
- targetpath = targetpath.rstrip("/")
- targetpath = targetpath.replace("/", os.sep)
-
- # Create all upper directories.
- upperdirs = os.path.dirname(targetpath)
- if upperdirs and not os.path.exists(upperdirs):
- # Create directories that are not part of the archive with
- # default permissions.
- os.makedirs(upperdirs)
-
- if tarinfo.islnk() or tarinfo.issym():
- self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
- else:
- self._dbg(1, tarinfo.name)
-
- if tarinfo.isreg():
- self.makefile(tarinfo, targetpath)
- elif tarinfo.isdir():
- self.makedir(tarinfo, targetpath)
- elif tarinfo.isfifo():
- self.makefifo(tarinfo, targetpath)
- elif tarinfo.ischr() or tarinfo.isblk():
- self.makedev(tarinfo, targetpath)
- elif tarinfo.islnk() or tarinfo.issym():
- self.makelink(tarinfo, targetpath)
- elif tarinfo.type not in SUPPORTED_TYPES:
- self.makeunknown(tarinfo, targetpath)
- else:
- self.makefile(tarinfo, targetpath)
-
- if set_attrs:
- self.chown(tarinfo, targetpath)
- if not tarinfo.issym():
- self.chmod(tarinfo, targetpath)
- self.utime(tarinfo, targetpath)
-
- #--------------------------------------------------------------------------
- # Below are the different file methods. They are called via
- # _extract_member() when extract() is called. They can be replaced in a
- # subclass to implement other functionality.
-
- def makedir(self, tarinfo, targetpath):
- """Make a directory called targetpath.
- """
- try:
- # Use a safe mode for the directory, the real mode is set
- # later in _extract_member().
- os.mkdir(targetpath, 0o700)
- except EnvironmentError as e:
- if e.errno != errno.EEXIST:
- raise
-
- def makefile(self, tarinfo, targetpath):
- """Make a file called targetpath.
- """
- source = self.fileobj
- source.seek(tarinfo.offset_data)
- target = bltn_open(targetpath, "wb")
- if tarinfo.sparse is not None:
- for offset, size in tarinfo.sparse:
- target.seek(offset)
- copyfileobj(source, target, size)
- else:
- copyfileobj(source, target, tarinfo.size)
- target.seek(tarinfo.size)
- target.truncate()
- target.close()
-
- def makeunknown(self, tarinfo, targetpath):
- """Make a file from a TarInfo object with an unknown type
- at targetpath.
- """
- self.makefile(tarinfo, targetpath)
- self._dbg(1, "tarfile: Unknown file type %r, " \
- "extracted as regular file." % tarinfo.type)
-
- def makefifo(self, tarinfo, targetpath):
- """Make a fifo called targetpath.
- """
- if hasattr(os, "mkfifo"):
- os.mkfifo(targetpath)
- else:
- raise ExtractError("fifo not supported by system")
-
- def makedev(self, tarinfo, targetpath):
- """Make a character or block device called targetpath.
- """
- if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
- raise ExtractError("special devices not supported by system")
-
- mode = tarinfo.mode
- if tarinfo.isblk():
- mode |= stat.S_IFBLK
- else:
- mode |= stat.S_IFCHR
-
- os.mknod(targetpath, mode,
- os.makedev(tarinfo.devmajor, tarinfo.devminor))
-
- def makelink(self, tarinfo, targetpath):
- """Make a (symbolic) link called targetpath. If it cannot be created
- (platform limitation), we try to make a copy of the referenced file
- instead of a link.
- """
- try:
- # For systems that support symbolic and hard links.
- if tarinfo.issym():
- os.symlink(tarinfo.linkname, targetpath)
- else:
- # See extract().
- if os.path.exists(tarinfo._link_target):
- os.link(tarinfo._link_target, targetpath)
- else:
- self._extract_member(self._find_link_target(tarinfo),
- targetpath)
- except symlink_exception:
- if tarinfo.issym():
- linkpath = os.path.join(os.path.dirname(tarinfo.name),
- tarinfo.linkname)
- else:
- linkpath = tarinfo.linkname
- else:
- try:
- self._extract_member(self._find_link_target(tarinfo),
- targetpath)
- except KeyError:
- raise ExtractError("unable to resolve link inside archive")
-
- def chown(self, tarinfo, targetpath):
- """Set owner of targetpath according to tarinfo.
- """
- if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
- # We have to be root to do so.
- try:
- g = grp.getgrnam(tarinfo.gname)[2]
- except KeyError:
- g = tarinfo.gid
- try:
- u = pwd.getpwnam(tarinfo.uname)[2]
- except KeyError:
- u = tarinfo.uid
- try:
- if tarinfo.issym() and hasattr(os, "lchown"):
- os.lchown(targetpath, u, g)
- else:
- if sys.platform != "os2emx":
- os.chown(targetpath, u, g)
- except EnvironmentError as e:
- raise ExtractError("could not change owner")
-
- def chmod(self, tarinfo, targetpath):
- """Set file permissions of targetpath according to tarinfo.
- """
- if hasattr(os, 'chmod'):
- try:
- os.chmod(targetpath, tarinfo.mode)
- except EnvironmentError as e:
- raise ExtractError("could not change mode")
-
- def utime(self, tarinfo, targetpath):
- """Set modification time of targetpath according to tarinfo.
- """
- if not hasattr(os, 'utime'):
- return
- try:
- os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
- except EnvironmentError as e:
- raise ExtractError("could not change modification time")
-
- #--------------------------------------------------------------------------
- def next(self):
- """Return the next member of the archive as a TarInfo object, when
- TarFile is opened for reading. Return None if there is no more
- available.
- """
- self._check("ra")
- if self.firstmember is not None:
- m = self.firstmember
- self.firstmember = None
- return m
-
- # Read the next block.
- self.fileobj.seek(self.offset)
- tarinfo = None
- while True:
- try:
- tarinfo = self.tarinfo.fromtarfile(self)
- except EOFHeaderError as e:
- if self.ignore_zeros:
- self._dbg(2, "0x%X: %s" % (self.offset, e))
- self.offset += BLOCKSIZE
- continue
- except InvalidHeaderError as e:
- if self.ignore_zeros:
- self._dbg(2, "0x%X: %s" % (self.offset, e))
- self.offset += BLOCKSIZE
- continue
- elif self.offset == 0:
- raise ReadError(str(e))
- except EmptyHeaderError:
- if self.offset == 0:
- raise ReadError("empty file")
- except TruncatedHeaderError as e:
- if self.offset == 0:
- raise ReadError(str(e))
- except SubsequentHeaderError as e:
- raise ReadError(str(e))
- break
-
- if tarinfo is not None:
- self.members.append(tarinfo)
- else:
- self._loaded = True
-
- return tarinfo
-
- #--------------------------------------------------------------------------
- # Little helper methods:
-
- def _getmember(self, name, tarinfo=None, normalize=False):
- """Find an archive member by name from bottom to top.
- If tarinfo is given, it is used as the starting point.
- """
- # Ensure that all members have been loaded.
- members = self.getmembers()
-
- # Limit the member search list up to tarinfo.
- if tarinfo is not None:
- members = members[:members.index(tarinfo)]
-
- if normalize:
- name = os.path.normpath(name)
-
- for member in reversed(members):
- if normalize:
- member_name = os.path.normpath(member.name)
- else:
- member_name = member.name
-
- if name == member_name:
- return member
-
- def _load(self):
- """Read through the entire archive file and look for readable
- members.
- """
- while True:
- tarinfo = self.next()
- if tarinfo is None:
- break
- self._loaded = True
-
- def _check(self, mode=None):
- """Check if TarFile is still open, and if the operation's mode
- corresponds to TarFile's mode.
- """
- if self.closed:
- raise IOError("%s is closed" % self.__class__.__name__)
- if mode is not None and self.mode not in mode:
- raise IOError("bad operation for mode %r" % self.mode)
-
- def _find_link_target(self, tarinfo):
- """Find the target member of a symlink or hardlink member in the
- archive.
- """
- if tarinfo.issym():
- # Always search the entire archive.
- linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
- limit = None
- else:
- # Search the archive before the link, because a hard link is
- # just a reference to an already archived file.
- linkname = tarinfo.linkname
- limit = tarinfo
-
- member = self._getmember(linkname, tarinfo=limit, normalize=True)
- if member is None:
- raise KeyError("linkname %r not found" % linkname)
- return member
-
- def __iter__(self):
- """Provide an iterator object.
- """
- if self._loaded:
- return iter(self.members)
- else:
- return TarIter(self)
-
- def _dbg(self, level, msg):
- """Write debugging output to sys.stderr.
- """
- if level <= self.debug:
- print(msg, file=sys.stderr)
-
- def __enter__(self):
- self._check()
- return self
-
- def __exit__(self, type, value, traceback):
- if type is None:
- self.close()
- else:
- # An exception occurred. We must not call close() because
- # it would try to write end-of-archive blocks and padding.
- if not self._extfileobj:
- self.fileobj.close()
- self.closed = True
-# class TarFile
-
-class TarIter(object):
- """Iterator Class.
-
- for tarinfo in TarFile(...):
- suite...
- """
-
- def __init__(self, tarfile):
- """Construct a TarIter object.
- """
- self.tarfile = tarfile
- self.index = 0
- def __iter__(self):
- """Return iterator object.
- """
- return self
-
- def __next__(self):
- """Return the next item using TarFile's next() method.
- When all members have been read, set TarFile as _loaded.
- """
- # Fix for SF #1100429: Under rare circumstances it can
- # happen that getmembers() is called during iteration,
- # which will cause TarIter to stop prematurely.
- if not self.tarfile._loaded:
- tarinfo = self.tarfile.next()
- if not tarinfo:
- self.tarfile._loaded = True
- raise StopIteration
- else:
- try:
- tarinfo = self.tarfile.members[self.index]
- except IndexError:
- raise StopIteration
- self.index += 1
- return tarinfo
-
- next = __next__ # for Python 2.x
-
-#--------------------
-# exported functions
-#--------------------
-def is_tarfile(name):
- """Return True if name points to a tar archive that we
- are able to handle, else return False.
- """
- try:
- t = open(name)
- t.close()
- return True
- except TarError:
- return False
-
-bltn_open = open
-open = TarFile.open
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/compat.py b/env/Lib/site-packages/pip/_vendor/distlib/compat.py
deleted file mode 100644
index 2b198dd..0000000
--- a/env/Lib/site-packages/pip/_vendor/distlib/compat.py
+++ /dev/null
@@ -1,1111 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2013-2016 Vinay Sajip.
-# Licensed to the Python Software Foundation under a contributor agreement.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-from __future__ import absolute_import
-
-import os
-import re
-import sys
-
-try:
- import ssl
-except ImportError:
- ssl = None
-
-if sys.version_info[0] < 3: # pragma: no cover
- from StringIO import StringIO
- string_types = basestring,
- text_type = unicode
- from types import FileType as file_type
- import __builtin__ as builtins
- import ConfigParser as configparser
- from ._backport import shutil
- from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
- from urllib import (urlretrieve, quote as _quote, unquote, url2pathname,
- pathname2url, ContentTooShortError, splittype)
-
- def quote(s):
- if isinstance(s, unicode):
- s = s.encode('utf-8')
- return _quote(s)
-
- import urllib2
- from urllib2 import (Request, urlopen, URLError, HTTPError,
- HTTPBasicAuthHandler, HTTPPasswordMgr,
- HTTPHandler, HTTPRedirectHandler,
- build_opener)
- if ssl:
- from urllib2 import HTTPSHandler
- import httplib
- import xmlrpclib
- import Queue as queue
- from HTMLParser import HTMLParser
- import htmlentitydefs
- raw_input = raw_input
- from itertools import ifilter as filter
- from itertools import ifilterfalse as filterfalse
-
- _userprog = None
- def splituser(host):
- """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
- global _userprog
- if _userprog is None:
- import re
- _userprog = re.compile('^(.*)@(.*)$')
-
- match = _userprog.match(host)
- if match: return match.group(1, 2)
- return None, host
-
-else: # pragma: no cover
- from io import StringIO
- string_types = str,
- text_type = str
- from io import TextIOWrapper as file_type
- import builtins
- import configparser
- import shutil
- from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote,
- unquote, urlsplit, urlunsplit, splittype)
- from urllib.request import (urlopen, urlretrieve, Request, url2pathname,
- pathname2url,
- HTTPBasicAuthHandler, HTTPPasswordMgr,
- HTTPHandler, HTTPRedirectHandler,
- build_opener)
- if ssl:
- from urllib.request import HTTPSHandler
- from urllib.error import HTTPError, URLError, ContentTooShortError
- import http.client as httplib
- import urllib.request as urllib2
- import xmlrpc.client as xmlrpclib
- import queue
- from html.parser import HTMLParser
- import html.entities as htmlentitydefs
- raw_input = input
- from itertools import filterfalse
- filter = filter
-
-try:
- from ssl import match_hostname, CertificateError
-except ImportError: # pragma: no cover
- class CertificateError(ValueError):
- pass
-
-
- def _dnsname_match(dn, hostname, max_wildcards=1):
- """Matching according to RFC 6125, section 6.4.3
-
- http://tools.ietf.org/html/rfc6125#section-6.4.3
- """
- pats = []
- if not dn:
- return False
-
- parts = dn.split('.')
- leftmost, remainder = parts[0], parts[1:]
-
- wildcards = leftmost.count('*')
- if wildcards > max_wildcards:
- # Issue #17980: avoid denials of service by refusing more
- # than one wildcard per fragment. A survey of established
- # policy among SSL implementations showed it to be a
- # reasonable choice.
- raise CertificateError(
- "too many wildcards in certificate DNS name: " + repr(dn))
-
- # speed up common case w/o wildcards
- if not wildcards:
- return dn.lower() == hostname.lower()
-
- # RFC 6125, section 6.4.3, subitem 1.
- # The client SHOULD NOT attempt to match a presented identifier in which
- # the wildcard character comprises a label other than the left-most label.
- if leftmost == '*':
- # When '*' is a fragment by itself, it matches a non-empty dotless
- # fragment.
- pats.append('[^.]+')
- elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
- # RFC 6125, section 6.4.3, subitem 3.
- # The client SHOULD NOT attempt to match a presented identifier
- # where the wildcard character is embedded within an A-label or
- # U-label of an internationalized domain name.
- pats.append(re.escape(leftmost))
- else:
- # Otherwise, '*' matches any dotless string, e.g. www*
- pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
-
- # add the remaining fragments, ignore any wildcards
- for frag in remainder:
- pats.append(re.escape(frag))
-
- pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
- return pat.match(hostname)
-
-
- def match_hostname(cert, hostname):
- """Verify that *cert* (in decoded format as returned by
- SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
- rules are followed, but IP addresses are not accepted for *hostname*.
-
- CertificateError is raised on failure. On success, the function
- returns nothing.
- """
- if not cert:
- raise ValueError("empty or no certificate, match_hostname needs a "
- "SSL socket or SSL context with either "
- "CERT_OPTIONAL or CERT_REQUIRED")
- dnsnames = []
- san = cert.get('subjectAltName', ())
- for key, value in san:
- if key == 'DNS':
- if _dnsname_match(value, hostname):
- return
- dnsnames.append(value)
- if not dnsnames:
- # The subject is only checked when there is no dNSName entry
- # in subjectAltName
- for sub in cert.get('subject', ()):
- for key, value in sub:
- # XXX according to RFC 2818, the most specific Common Name
- # must be used.
- if key == 'commonName':
- if _dnsname_match(value, hostname):
- return
- dnsnames.append(value)
- if len(dnsnames) > 1:
- raise CertificateError("hostname %r "
- "doesn't match either of %s"
- % (hostname, ', '.join(map(repr, dnsnames))))
- elif len(dnsnames) == 1:
- raise CertificateError("hostname %r "
- "doesn't match %r"
- % (hostname, dnsnames[0]))
- else:
- raise CertificateError("no appropriate commonName or "
- "subjectAltName fields were found")
-
-
-try:
- from types import SimpleNamespace as Container
-except ImportError: # pragma: no cover
- class Container(object):
- """
- A generic container for when multiple values need to be returned
- """
- def __init__(self, **kwargs):
- self.__dict__.update(kwargs)
-
-
-try:
- from shutil import which
-except ImportError: # pragma: no cover
- # Implementation from Python 3.3
- def which(cmd, mode=os.F_OK | os.X_OK, path=None):
- """Given a command, mode, and a PATH string, return the path which
- conforms to the given mode on the PATH, or None if there is no such
- file.
-
- `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
- of os.environ.get("PATH"), or can be overridden with a custom search
- path.
-
- """
- # Check that a given file can be accessed with the correct mode.
- # Additionally check that `file` is not a directory, as on Windows
- # directories pass the os.access check.
- def _access_check(fn, mode):
- return (os.path.exists(fn) and os.access(fn, mode)
- and not os.path.isdir(fn))
-
- # If we're given a path with a directory part, look it up directly rather
- # than referring to PATH directories. This includes checking relative to the
- # current directory, e.g. ./script
- if os.path.dirname(cmd):
- if _access_check(cmd, mode):
- return cmd
- return None
-
- if path is None:
- path = os.environ.get("PATH", os.defpath)
- if not path:
- return None
- path = path.split(os.pathsep)
-
- if sys.platform == "win32":
- # The current directory takes precedence on Windows.
- if not os.curdir in path:
- path.insert(0, os.curdir)
-
- # PATHEXT is necessary to check on Windows.
- pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
- # See if the given file matches any of the expected path extensions.
- # This will allow us to short circuit when given "python.exe".
- # If it does match, only test that one, otherwise we have to try
- # others.
- if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
- files = [cmd]
- else:
- files = [cmd + ext for ext in pathext]
- else:
- # On other platforms you don't have things like PATHEXT to tell you
- # what file suffixes are executable, so just pass on cmd as-is.
- files = [cmd]
-
- seen = set()
- for dir in path:
- normdir = os.path.normcase(dir)
- if not normdir in seen:
- seen.add(normdir)
- for thefile in files:
- name = os.path.join(dir, thefile)
- if _access_check(name, mode):
- return name
- return None
-
-
-# ZipFile is a context manager in 2.7, but not in 2.6
-
-from zipfile import ZipFile as BaseZipFile
-
-if hasattr(BaseZipFile, '__enter__'): # pragma: no cover
- ZipFile = BaseZipFile
-else:
- from zipfile import ZipExtFile as BaseZipExtFile
-
- class ZipExtFile(BaseZipExtFile):
- def __init__(self, base):
- self.__dict__.update(base.__dict__)
-
- def __enter__(self):
- return self
-
- def __exit__(self, *exc_info):
- self.close()
- # return None, so if an exception occurred, it will propagate
-
- class ZipFile(BaseZipFile):
- def __enter__(self):
- return self
-
- def __exit__(self, *exc_info):
- self.close()
- # return None, so if an exception occurred, it will propagate
-
- def open(self, *args, **kwargs):
- base = BaseZipFile.open(self, *args, **kwargs)
- return ZipExtFile(base)
-
-try:
- from platform import python_implementation
-except ImportError: # pragma: no cover
- def python_implementation():
- """Return a string identifying the Python implementation."""
- if 'PyPy' in sys.version:
- return 'PyPy'
- if os.name == 'java':
- return 'Jython'
- if sys.version.startswith('IronPython'):
- return 'IronPython'
- return 'CPython'
-
-try:
- import sysconfig
-except ImportError: # pragma: no cover
- from ._backport import sysconfig
-
-try:
- callable = callable
-except NameError: # pragma: no cover
- from collections import Callable
-
- def callable(obj):
- return isinstance(obj, Callable)
-
-
-try:
- fsencode = os.fsencode
- fsdecode = os.fsdecode
-except AttributeError: # pragma: no cover
- _fsencoding = sys.getfilesystemencoding()
- if _fsencoding == 'mbcs':
- _fserrors = 'strict'
- else:
- _fserrors = 'surrogateescape'
-
- def fsencode(filename):
- if isinstance(filename, bytes):
- return filename
- elif isinstance(filename, text_type):
- return filename.encode(_fsencoding, _fserrors)
- else:
- raise TypeError("expect bytes or str, not %s" %
- type(filename).__name__)
-
- def fsdecode(filename):
- if isinstance(filename, text_type):
- return filename
- elif isinstance(filename, bytes):
- return filename.decode(_fsencoding, _fserrors)
- else:
- raise TypeError("expect bytes or str, not %s" %
- type(filename).__name__)
-
-try:
- from tokenize import detect_encoding
-except ImportError: # pragma: no cover
- from codecs import BOM_UTF8, lookup
- import re
-
- cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
-
- def _get_normal_name(orig_enc):
- """Imitates get_normal_name in tokenizer.c."""
- # Only care about the first 12 characters.
- enc = orig_enc[:12].lower().replace("_", "-")
- if enc == "utf-8" or enc.startswith("utf-8-"):
- return "utf-8"
- if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
- enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
- return "iso-8859-1"
- return orig_enc
-
- def detect_encoding(readline):
- """
- The detect_encoding() function is used to detect the encoding that should
- be used to decode a Python source file. It requires one argument, readline,
- in the same way as the tokenize() generator.
-
- It will call readline a maximum of twice, and return the encoding used
- (as a string) and a list of any lines (left as bytes) it has read in.
-
- It detects the encoding from the presence of a utf-8 bom or an encoding
- cookie as specified in pep-0263. If both a bom and a cookie are present,
- but disagree, a SyntaxError will be raised. If the encoding cookie is an
- invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
- 'utf-8-sig' is returned.
-
- If no encoding is specified, then the default of 'utf-8' will be returned.
- """
- try:
- filename = readline.__self__.name
- except AttributeError:
- filename = None
- bom_found = False
- encoding = None
- default = 'utf-8'
- def read_or_stop():
- try:
- return readline()
- except StopIteration:
- return b''
-
- def find_cookie(line):
- try:
- # Decode as UTF-8. Either the line is an encoding declaration,
- # in which case it should be pure ASCII, or it must be UTF-8
- # per default encoding.
- line_string = line.decode('utf-8')
- except UnicodeDecodeError:
- msg = "invalid or missing encoding declaration"
- if filename is not None:
- msg = '{} for {!r}'.format(msg, filename)
- raise SyntaxError(msg)
-
- matches = cookie_re.findall(line_string)
- if not matches:
- return None
- encoding = _get_normal_name(matches[0])
- try:
- codec = lookup(encoding)
- except LookupError:
- # This behaviour mimics the Python interpreter
- if filename is None:
- msg = "unknown encoding: " + encoding
- else:
- msg = "unknown encoding for {!r}: {}".format(filename,
- encoding)
- raise SyntaxError(msg)
-
- if bom_found:
- if codec.name != 'utf-8':
- # This behaviour mimics the Python interpreter
- if filename is None:
- msg = 'encoding problem: utf-8'
- else:
- msg = 'encoding problem for {!r}: utf-8'.format(filename)
- raise SyntaxError(msg)
- encoding += '-sig'
- return encoding
-
- first = read_or_stop()
- if first.startswith(BOM_UTF8):
- bom_found = True
- first = first[3:]
- default = 'utf-8-sig'
- if not first:
- return default, []
-
- encoding = find_cookie(first)
- if encoding:
- return encoding, [first]
-
- second = read_or_stop()
- if not second:
- return default, [first]
-
- encoding = find_cookie(second)
- if encoding:
- return encoding, [first, second]
-
- return default, [first, second]
-
-# For converting & <-> & etc.
-try:
- from html import escape
-except ImportError:
- from cgi import escape
-if sys.version_info[:2] < (3, 4):
- unescape = HTMLParser().unescape
-else:
- from html import unescape
-
-try:
- from collections import ChainMap
-except ImportError: # pragma: no cover
- from collections import MutableMapping
-
- try:
- from reprlib import recursive_repr as _recursive_repr
- except ImportError:
- def _recursive_repr(fillvalue='...'):
- '''
- Decorator to make a repr function return fillvalue for a recursive
- call
- '''
-
- def decorating_function(user_function):
- repr_running = set()
-
- def wrapper(self):
- key = id(self), get_ident()
- if key in repr_running:
- return fillvalue
- repr_running.add(key)
- try:
- result = user_function(self)
- finally:
- repr_running.discard(key)
- return result
-
- # Can't use functools.wraps() here because of bootstrap issues
- wrapper.__module__ = getattr(user_function, '__module__')
- wrapper.__doc__ = getattr(user_function, '__doc__')
- wrapper.__name__ = getattr(user_function, '__name__')
- wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
- return wrapper
-
- return decorating_function
-
- class ChainMap(MutableMapping):
- ''' A ChainMap groups multiple dicts (or other mappings) together
- to create a single, updateable view.
-
- The underlying mappings are stored in a list. That list is public and can
- accessed or updated using the *maps* attribute. There is no other state.
-
- Lookups search the underlying mappings successively until a key is found.
- In contrast, writes, updates, and deletions only operate on the first
- mapping.
-
- '''
-
- def __init__(self, *maps):
- '''Initialize a ChainMap by setting *maps* to the given mappings.
- If no mappings are provided, a single empty dictionary is used.
-
- '''
- self.maps = list(maps) or [{}] # always at least one map
-
- def __missing__(self, key):
- raise KeyError(key)
-
- def __getitem__(self, key):
- for mapping in self.maps:
- try:
- return mapping[key] # can't use 'key in mapping' with defaultdict
- except KeyError:
- pass
- return self.__missing__(key) # support subclasses that define __missing__
-
- def get(self, key, default=None):
- return self[key] if key in self else default
-
- def __len__(self):
- return len(set().union(*self.maps)) # reuses stored hash values if possible
-
- def __iter__(self):
- return iter(set().union(*self.maps))
-
- def __contains__(self, key):
- return any(key in m for m in self.maps)
-
- def __bool__(self):
- return any(self.maps)
-
- @_recursive_repr()
- def __repr__(self):
- return '{0.__class__.__name__}({1})'.format(
- self, ', '.join(map(repr, self.maps)))
-
- @classmethod
- def fromkeys(cls, iterable, *args):
- 'Create a ChainMap with a single dict created from the iterable.'
- return cls(dict.fromkeys(iterable, *args))
-
- def copy(self):
- 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
- return self.__class__(self.maps[0].copy(), *self.maps[1:])
-
- __copy__ = copy
-
- def new_child(self): # like Django's Context.push()
- 'New ChainMap with a new dict followed by all previous maps.'
- return self.__class__({}, *self.maps)
-
- @property
- def parents(self): # like Django's Context.pop()
- 'New ChainMap from maps[1:].'
- return self.__class__(*self.maps[1:])
-
- def __setitem__(self, key, value):
- self.maps[0][key] = value
-
- def __delitem__(self, key):
- try:
- del self.maps[0][key]
- except KeyError:
- raise KeyError('Key not found in the first mapping: {!r}'.format(key))
-
- def popitem(self):
- 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
- try:
- return self.maps[0].popitem()
- except KeyError:
- raise KeyError('No keys found in the first mapping.')
-
- def pop(self, key, *args):
- 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
- try:
- return self.maps[0].pop(key, *args)
- except KeyError:
- raise KeyError('Key not found in the first mapping: {!r}'.format(key))
-
- def clear(self):
- 'Clear maps[0], leaving maps[1:] intact.'
- self.maps[0].clear()
-
-try:
- from imp import cache_from_source
-except ImportError: # pragma: no cover
- def cache_from_source(path, debug_override=None):
- assert path.endswith('.py')
- if debug_override is None:
- debug_override = __debug__
- if debug_override:
- suffix = 'c'
- else:
- suffix = 'o'
- return path + suffix
-
-try:
- from collections import OrderedDict
-except ImportError: # pragma: no cover
-## {{{ http://code.activestate.com/recipes/576693/ (r9)
-# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
-# Passes Python2.7's test suite and incorporates all the latest updates.
- try:
- from thread import get_ident as _get_ident
- except ImportError:
- from dummy_thread import get_ident as _get_ident
-
- try:
- from _abcoll import KeysView, ValuesView, ItemsView
- except ImportError:
- pass
-
-
- class OrderedDict(dict):
- 'Dictionary that remembers insertion order'
- # An inherited dict maps keys to values.
- # The inherited dict provides __getitem__, __len__, __contains__, and get.
- # The remaining methods are order-aware.
- # Big-O running times for all methods are the same as for regular dictionaries.
-
- # The internal self.__map dictionary maps keys to links in a doubly linked list.
- # The circular doubly linked list starts and ends with a sentinel element.
- # The sentinel element never gets deleted (this simplifies the algorithm).
- # Each link is stored as a list of length three: [PREV, NEXT, KEY].
-
- def __init__(self, *args, **kwds):
- '''Initialize an ordered dictionary. Signature is the same as for
- regular dictionaries, but keyword arguments are not recommended
- because their insertion order is arbitrary.
-
- '''
- if len(args) > 1:
- raise TypeError('expected at most 1 arguments, got %d' % len(args))
- try:
- self.__root
- except AttributeError:
- self.__root = root = [] # sentinel node
- root[:] = [root, root, None]
- self.__map = {}
- self.__update(*args, **kwds)
-
- def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
- 'od.__setitem__(i, y) <==> od[i]=y'
- # Setting a new item creates a new link which goes at the end of the linked
- # list, and the inherited dictionary is updated with the new key/value pair.
- if key not in self:
- root = self.__root
- last = root[0]
- last[1] = root[0] = self.__map[key] = [last, root, key]
- dict_setitem(self, key, value)
-
- def __delitem__(self, key, dict_delitem=dict.__delitem__):
- 'od.__delitem__(y) <==> del od[y]'
- # Deleting an existing item uses self.__map to find the link which is
- # then removed by updating the links in the predecessor and successor nodes.
- dict_delitem(self, key)
- link_prev, link_next, key = self.__map.pop(key)
- link_prev[1] = link_next
- link_next[0] = link_prev
-
- def __iter__(self):
- 'od.__iter__() <==> iter(od)'
- root = self.__root
- curr = root[1]
- while curr is not root:
- yield curr[2]
- curr = curr[1]
-
- def __reversed__(self):
- 'od.__reversed__() <==> reversed(od)'
- root = self.__root
- curr = root[0]
- while curr is not root:
- yield curr[2]
- curr = curr[0]
-
- def clear(self):
- 'od.clear() -> None. Remove all items from od.'
- try:
- for node in self.__map.itervalues():
- del node[:]
- root = self.__root
- root[:] = [root, root, None]
- self.__map.clear()
- except AttributeError:
- pass
- dict.clear(self)
-
- def popitem(self, last=True):
- '''od.popitem() -> (k, v), return and remove a (key, value) pair.
- Pairs are returned in LIFO order if last is true or FIFO order if false.
-
- '''
- if not self:
- raise KeyError('dictionary is empty')
- root = self.__root
- if last:
- link = root[0]
- link_prev = link[0]
- link_prev[1] = root
- root[0] = link_prev
- else:
- link = root[1]
- link_next = link[1]
- root[1] = link_next
- link_next[0] = root
- key = link[2]
- del self.__map[key]
- value = dict.pop(self, key)
- return key, value
-
- # -- the following methods do not depend on the internal structure --
-
- def keys(self):
- 'od.keys() -> list of keys in od'
- return list(self)
-
- def values(self):
- 'od.values() -> list of values in od'
- return [self[key] for key in self]
-
- def items(self):
- 'od.items() -> list of (key, value) pairs in od'
- return [(key, self[key]) for key in self]
-
- def iterkeys(self):
- 'od.iterkeys() -> an iterator over the keys in od'
- return iter(self)
-
- def itervalues(self):
- 'od.itervalues -> an iterator over the values in od'
- for k in self:
- yield self[k]
-
- def iteritems(self):
- 'od.iteritems -> an iterator over the (key, value) items in od'
- for k in self:
- yield (k, self[k])
-
- def update(*args, **kwds):
- '''od.update(E, **F) -> None. Update od from dict/iterable E and F.
-
- If E is a dict instance, does: for k in E: od[k] = E[k]
- If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
- Or if E is an iterable of items, does: for k, v in E: od[k] = v
- In either case, this is followed by: for k, v in F.items(): od[k] = v
-
- '''
- if len(args) > 2:
- raise TypeError('update() takes at most 2 positional '
- 'arguments (%d given)' % (len(args),))
- elif not args:
- raise TypeError('update() takes at least 1 argument (0 given)')
- self = args[0]
- # Make progressively weaker assumptions about "other"
- other = ()
- if len(args) == 2:
- other = args[1]
- if isinstance(other, dict):
- for key in other:
- self[key] = other[key]
- elif hasattr(other, 'keys'):
- for key in other.keys():
- self[key] = other[key]
- else:
- for key, value in other:
- self[key] = value
- for key, value in kwds.items():
- self[key] = value
-
- __update = update # let subclasses override update without breaking __init__
-
- __marker = object()
-
- def pop(self, key, default=__marker):
- '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
- If key is not found, d is returned if given, otherwise KeyError is raised.
-
- '''
- if key in self:
- result = self[key]
- del self[key]
- return result
- if default is self.__marker:
- raise KeyError(key)
- return default
-
- def setdefault(self, key, default=None):
- 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
- if key in self:
- return self[key]
- self[key] = default
- return default
-
- def __repr__(self, _repr_running=None):
- 'od.__repr__() <==> repr(od)'
- if not _repr_running: _repr_running = {}
- call_key = id(self), _get_ident()
- if call_key in _repr_running:
- return '...'
- _repr_running[call_key] = 1
- try:
- if not self:
- return '%s()' % (self.__class__.__name__,)
- return '%s(%r)' % (self.__class__.__name__, self.items())
- finally:
- del _repr_running[call_key]
-
- def __reduce__(self):
- 'Return state information for pickling'
- items = [[k, self[k]] for k in self]
- inst_dict = vars(self).copy()
- for k in vars(OrderedDict()):
- inst_dict.pop(k, None)
- if inst_dict:
- return (self.__class__, (items,), inst_dict)
- return self.__class__, (items,)
-
- def copy(self):
- 'od.copy() -> a shallow copy of od'
- return self.__class__(self)
-
- @classmethod
- def fromkeys(cls, iterable, value=None):
- '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
- and values equal to v (which defaults to None).
-
- '''
- d = cls()
- for key in iterable:
- d[key] = value
- return d
-
- def __eq__(self, other):
- '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
- while comparison to a regular mapping is order-insensitive.
-
- '''
- if isinstance(other, OrderedDict):
- return len(self)==len(other) and self.items() == other.items()
- return dict.__eq__(self, other)
-
- def __ne__(self, other):
- return not self == other
-
- # -- the following methods are only used in Python 2.7 --
-
- def viewkeys(self):
- "od.viewkeys() -> a set-like object providing a view on od's keys"
- return KeysView(self)
-
- def viewvalues(self):
- "od.viewvalues() -> an object providing a view on od's values"
- return ValuesView(self)
-
- def viewitems(self):
- "od.viewitems() -> a set-like object providing a view on od's items"
- return ItemsView(self)
-
-try:
- from logging.config import BaseConfigurator, valid_ident
-except ImportError: # pragma: no cover
- IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
-
-
- def valid_ident(s):
- m = IDENTIFIER.match(s)
- if not m:
- raise ValueError('Not a valid Python identifier: %r' % s)
- return True
-
-
- # The ConvertingXXX classes are wrappers around standard Python containers,
- # and they serve to convert any suitable values in the container. The
- # conversion converts base dicts, lists and tuples to their wrapped
- # equivalents, whereas strings which match a conversion format are converted
- # appropriately.
- #
- # Each wrapper should have a configurator attribute holding the actual
- # configurator to use for conversion.
-
- class ConvertingDict(dict):
- """A converting dictionary wrapper."""
-
- def __getitem__(self, key):
- value = dict.__getitem__(self, key)
- result = self.configurator.convert(value)
- #If the converted value is different, save for next time
- if value is not result:
- self[key] = result
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
-
- def get(self, key, default=None):
- value = dict.get(self, key, default)
- result = self.configurator.convert(value)
- #If the converted value is different, save for next time
- if value is not result:
- self[key] = result
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
-
- def pop(self, key, default=None):
- value = dict.pop(self, key, default)
- result = self.configurator.convert(value)
- if value is not result:
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
-
- class ConvertingList(list):
- """A converting list wrapper."""
- def __getitem__(self, key):
- value = list.__getitem__(self, key)
- result = self.configurator.convert(value)
- #If the converted value is different, save for next time
- if value is not result:
- self[key] = result
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
-
- def pop(self, idx=-1):
- value = list.pop(self, idx)
- result = self.configurator.convert(value)
- if value is not result:
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- return result
-
- class ConvertingTuple(tuple):
- """A converting tuple wrapper."""
- def __getitem__(self, key):
- value = tuple.__getitem__(self, key)
- result = self.configurator.convert(value)
- if value is not result:
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
-
- class BaseConfigurator(object):
- """
- The configurator base class which defines some useful defaults.
- """
-
- CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$')
-
- WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
- DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
- INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
- DIGIT_PATTERN = re.compile(r'^\d+$')
-
- value_converters = {
- 'ext' : 'ext_convert',
- 'cfg' : 'cfg_convert',
- }
-
- # We might want to use a different one, e.g. importlib
- importer = staticmethod(__import__)
-
- def __init__(self, config):
- self.config = ConvertingDict(config)
- self.config.configurator = self
-
- def resolve(self, s):
- """
- Resolve strings to objects using standard import and attribute
- syntax.
- """
- name = s.split('.')
- used = name.pop(0)
- try:
- found = self.importer(used)
- for frag in name:
- used += '.' + frag
- try:
- found = getattr(found, frag)
- except AttributeError:
- self.importer(used)
- found = getattr(found, frag)
- return found
- except ImportError:
- e, tb = sys.exc_info()[1:]
- v = ValueError('Cannot resolve %r: %s' % (s, e))
- v.__cause__, v.__traceback__ = e, tb
- raise v
-
- def ext_convert(self, value):
- """Default converter for the ext:// protocol."""
- return self.resolve(value)
-
- def cfg_convert(self, value):
- """Default converter for the cfg:// protocol."""
- rest = value
- m = self.WORD_PATTERN.match(rest)
- if m is None:
- raise ValueError("Unable to convert %r" % value)
- else:
- rest = rest[m.end():]
- d = self.config[m.groups()[0]]
- #print d, rest
- while rest:
- m = self.DOT_PATTERN.match(rest)
- if m:
- d = d[m.groups()[0]]
- else:
- m = self.INDEX_PATTERN.match(rest)
- if m:
- idx = m.groups()[0]
- if not self.DIGIT_PATTERN.match(idx):
- d = d[idx]
- else:
- try:
- n = int(idx) # try as number first (most likely)
- d = d[n]
- except TypeError:
- d = d[idx]
- if m:
- rest = rest[m.end():]
- else:
- raise ValueError('Unable to convert '
- '%r at %r' % (value, rest))
- #rest should be empty
- return d
-
- def convert(self, value):
- """
- Convert values to an appropriate type. dicts, lists and tuples are
- replaced by their converting alternatives. Strings are checked to
- see if they have a conversion format and are converted if they do.
- """
- if not isinstance(value, ConvertingDict) and isinstance(value, dict):
- value = ConvertingDict(value)
- value.configurator = self
- elif not isinstance(value, ConvertingList) and isinstance(value, list):
- value = ConvertingList(value)
- value.configurator = self
- elif not isinstance(value, ConvertingTuple) and\
- isinstance(value, tuple):
- value = ConvertingTuple(value)
- value.configurator = self
- elif isinstance(value, string_types):
- m = self.CONVERT_PATTERN.match(value)
- if m:
- d = m.groupdict()
- prefix = d['prefix']
- converter = self.value_converters.get(prefix, None)
- if converter:
- suffix = d['suffix']
- converter = getattr(self, converter)
- value = converter(suffix)
- return value
-
- def configure_custom(self, config):
- """Configure an object with a user-supplied factory."""
- c = config.pop('()')
- if not callable(c):
- c = self.resolve(c)
- props = config.pop('.', None)
- # Check for valid identifiers
- kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
- result = c(**kwargs)
- if props:
- for name, value in props.items():
- setattr(result, name, value)
- return result
-
- def as_tuple(self, value):
- """Utility function which converts lists to tuples."""
- if isinstance(value, list):
- value = tuple(value)
- return value
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/database.py b/env/Lib/site-packages/pip/_vendor/distlib/database.py
deleted file mode 100644
index c314426..0000000
--- a/env/Lib/site-packages/pip/_vendor/distlib/database.py
+++ /dev/null
@@ -1,1312 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2012-2016 The Python Software Foundation.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-"""PEP 376 implementation."""
-
-from __future__ import unicode_literals
-
-import base64
-import codecs
-import contextlib
-import hashlib
-import logging
-import os
-import posixpath
-import sys
-import zipimport
-
-from . import DistlibException, resources
-from .compat import StringIO
-from .version import get_scheme, UnsupportedVersionError
-from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME
-from .util import (parse_requirement, cached_property, parse_name_and_version,
- read_exports, write_exports, CSVReader, CSVWriter)
-
-
-__all__ = ['Distribution', 'BaseInstalledDistribution',
- 'InstalledDistribution', 'EggInfoDistribution',
- 'DistributionPath']
-
-
-logger = logging.getLogger(__name__)
-
-EXPORTS_FILENAME = 'pydist-exports.json'
-COMMANDS_FILENAME = 'pydist-commands.json'
-
-DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
- 'RESOURCES', EXPORTS_FILENAME, 'SHARED')
-
-DISTINFO_EXT = '.dist-info'
-
-
-class _Cache(object):
- """
- A simple cache mapping names and .dist-info paths to distributions
- """
- def __init__(self):
- """
- Initialise an instance. There is normally one for each DistributionPath.
- """
- self.name = {}
- self.path = {}
- self.generated = False
-
- def clear(self):
- """
- Clear the cache, setting it to its initial state.
- """
- self.name.clear()
- self.path.clear()
- self.generated = False
-
- def add(self, dist):
- """
- Add a distribution to the cache.
- :param dist: The distribution to add.
- """
- if dist.path not in self.path:
- self.path[dist.path] = dist
- self.name.setdefault(dist.key, []).append(dist)
-
-
-class DistributionPath(object):
- """
- Represents a set of distributions installed on a path (typically sys.path).
- """
- def __init__(self, path=None, include_egg=False):
- """
- Create an instance from a path, optionally including legacy (distutils/
- setuptools/distribute) distributions.
- :param path: The path to use, as a list of directories. If not specified,
- sys.path is used.
- :param include_egg: If True, this instance will look for and return legacy
- distributions as well as those based on PEP 376.
- """
- if path is None:
- path = sys.path
- self.path = path
- self._include_dist = True
- self._include_egg = include_egg
-
- self._cache = _Cache()
- self._cache_egg = _Cache()
- self._cache_enabled = True
- self._scheme = get_scheme('default')
-
- def _get_cache_enabled(self):
- return self._cache_enabled
-
- def _set_cache_enabled(self, value):
- self._cache_enabled = value
-
- cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
-
- def clear_cache(self):
- """
- Clears the internal cache.
- """
- self._cache.clear()
- self._cache_egg.clear()
-
-
- def _yield_distributions(self):
- """
- Yield .dist-info and/or .egg(-info) distributions.
- """
- # We need to check if we've seen some resources already, because on
- # some Linux systems (e.g. some Debian/Ubuntu variants) there are
- # symlinks which alias other files in the environment.
- seen = set()
- for path in self.path:
- finder = resources.finder_for_path(path)
- if finder is None:
- continue
- r = finder.find('')
- if not r or not r.is_container:
- continue
- rset = sorted(r.resources)
- for entry in rset:
- r = finder.find(entry)
- if not r or r.path in seen:
- continue
- if self._include_dist and entry.endswith(DISTINFO_EXT):
- possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME]
- for metadata_filename in possible_filenames:
- metadata_path = posixpath.join(entry, metadata_filename)
- pydist = finder.find(metadata_path)
- if pydist:
- break
- else:
- continue
-
- with contextlib.closing(pydist.as_stream()) as stream:
- metadata = Metadata(fileobj=stream, scheme='legacy')
- logger.debug('Found %s', r.path)
- seen.add(r.path)
- yield new_dist_class(r.path, metadata=metadata,
- env=self)
- elif self._include_egg and entry.endswith(('.egg-info',
- '.egg')):
- logger.debug('Found %s', r.path)
- seen.add(r.path)
- yield old_dist_class(r.path, self)
-
- def _generate_cache(self):
- """
- Scan the path for distributions and populate the cache with
- those that are found.
- """
- gen_dist = not self._cache.generated
- gen_egg = self._include_egg and not self._cache_egg.generated
- if gen_dist or gen_egg:
- for dist in self._yield_distributions():
- if isinstance(dist, InstalledDistribution):
- self._cache.add(dist)
- else:
- self._cache_egg.add(dist)
-
- if gen_dist:
- self._cache.generated = True
- if gen_egg:
- self._cache_egg.generated = True
-
- @classmethod
- def distinfo_dirname(cls, name, version):
- """
- The *name* and *version* parameters are converted into their
- filename-escaped form, i.e. any ``'-'`` characters are replaced
- with ``'_'`` other than the one in ``'dist-info'`` and the one
- separating the name from the version number.
-
- :parameter name: is converted to a standard distribution name by replacing
- any runs of non- alphanumeric characters with a single
- ``'-'``.
- :type name: string
- :parameter version: is converted to a standard version string. Spaces
- become dots, and all other non-alphanumeric characters
- (except dots) become dashes, with runs of multiple
- dashes condensed to a single dash.
- :type version: string
- :returns: directory name
- :rtype: string"""
- name = name.replace('-', '_')
- return '-'.join([name, version]) + DISTINFO_EXT
-
- def get_distributions(self):
- """
- Provides an iterator that looks for distributions and returns
- :class:`InstalledDistribution` or
- :class:`EggInfoDistribution` instances for each one of them.
-
- :rtype: iterator of :class:`InstalledDistribution` and
- :class:`EggInfoDistribution` instances
- """
- if not self._cache_enabled:
- for dist in self._yield_distributions():
- yield dist
- else:
- self._generate_cache()
-
- for dist in self._cache.path.values():
- yield dist
-
- if self._include_egg:
- for dist in self._cache_egg.path.values():
- yield dist
-
- def get_distribution(self, name):
- """
- Looks for a named distribution on the path.
-
- This function only returns the first result found, as no more than one
- value is expected. If nothing is found, ``None`` is returned.
-
- :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
- or ``None``
- """
- result = None
- name = name.lower()
- if not self._cache_enabled:
- for dist in self._yield_distributions():
- if dist.key == name:
- result = dist
- break
- else:
- self._generate_cache()
-
- if name in self._cache.name:
- result = self._cache.name[name][0]
- elif self._include_egg and name in self._cache_egg.name:
- result = self._cache_egg.name[name][0]
- return result
-
- def provides_distribution(self, name, version=None):
- """
- Iterates over all distributions to find which distributions provide *name*.
- If a *version* is provided, it will be used to filter the results.
-
- This function only returns the first result found, since no more than
- one values are expected. If the directory is not found, returns ``None``.
-
- :parameter version: a version specifier that indicates the version
- required, conforming to the format in ``PEP-345``
-
- :type name: string
- :type version: string
- """
- matcher = None
- if not version is None:
- try:
- matcher = self._scheme.matcher('%s (%s)' % (name, version))
- except ValueError:
- raise DistlibException('invalid name or version: %r, %r' %
- (name, version))
-
- for dist in self.get_distributions():
- provided = dist.provides
-
- for p in provided:
- p_name, p_ver = parse_name_and_version(p)
- if matcher is None:
- if p_name == name:
- yield dist
- break
- else:
- if p_name == name and matcher.match(p_ver):
- yield dist
- break
-
- def get_file_path(self, name, relative_path):
- """
- Return the path to a resource file.
- """
- dist = self.get_distribution(name)
- if dist is None:
- raise LookupError('no distribution named %r found' % name)
- return dist.get_resource_path(relative_path)
-
- def get_exported_entries(self, category, name=None):
- """
- Return all of the exported entries in a particular category.
-
- :param category: The category to search for entries.
- :param name: If specified, only entries with that name are returned.
- """
- for dist in self.get_distributions():
- r = dist.exports
- if category in r:
- d = r[category]
- if name is not None:
- if name in d:
- yield d[name]
- else:
- for v in d.values():
- yield v
-
-
-class Distribution(object):
- """
- A base class for distributions, whether installed or from indexes.
- Either way, it must have some metadata, so that's all that's needed
- for construction.
- """
-
- build_time_dependency = False
- """
- Set to True if it's known to be only a build-time dependency (i.e.
- not needed after installation).
- """
-
- requested = False
- """A boolean that indicates whether the ``REQUESTED`` metadata file is
- present (in other words, whether the package was installed by user
- request or it was installed as a dependency)."""
-
- def __init__(self, metadata):
- """
- Initialise an instance.
- :param metadata: The instance of :class:`Metadata` describing this
- distribution.
- """
- self.metadata = metadata
- self.name = metadata.name
- self.key = self.name.lower() # for case-insensitive comparisons
- self.version = metadata.version
- self.locator = None
- self.digest = None
- self.extras = None # additional features requested
- self.context = None # environment marker overrides
- self.download_urls = set()
- self.digests = {}
-
- @property
- def source_url(self):
- """
- The source archive download URL for this distribution.
- """
- return self.metadata.source_url
-
- download_url = source_url # Backward compatibility
-
- @property
- def name_and_version(self):
- """
- A utility property which displays the name and version in parentheses.
- """
- return '%s (%s)' % (self.name, self.version)
-
- @property
- def provides(self):
- """
- A set of distribution names and versions provided by this distribution.
- :return: A set of "name (version)" strings.
- """
- plist = self.metadata.provides
- s = '%s (%s)' % (self.name, self.version)
- if s not in plist:
- plist.append(s)
- return plist
-
- def _get_requirements(self, req_attr):
- md = self.metadata
- logger.debug('Getting requirements from metadata %r', md.todict())
- reqts = getattr(md, req_attr)
- return set(md.get_requirements(reqts, extras=self.extras,
- env=self.context))
-
- @property
- def run_requires(self):
- return self._get_requirements('run_requires')
-
- @property
- def meta_requires(self):
- return self._get_requirements('meta_requires')
-
- @property
- def build_requires(self):
- return self._get_requirements('build_requires')
-
- @property
- def test_requires(self):
- return self._get_requirements('test_requires')
-
- @property
- def dev_requires(self):
- return self._get_requirements('dev_requires')
-
- def matches_requirement(self, req):
- """
- Say if this instance matches (fulfills) a requirement.
- :param req: The requirement to match.
- :rtype req: str
- :return: True if it matches, else False.
- """
- # Requirement may contain extras - parse to lose those
- # from what's passed to the matcher
- r = parse_requirement(req)
- scheme = get_scheme(self.metadata.scheme)
- try:
- matcher = scheme.matcher(r.requirement)
- except UnsupportedVersionError:
- # XXX compat-mode if cannot read the version
- logger.warning('could not read version %r - using name only',
- req)
- name = req.split()[0]
- matcher = scheme.matcher(name)
-
- name = matcher.key # case-insensitive
-
- result = False
- for p in self.provides:
- p_name, p_ver = parse_name_and_version(p)
- if p_name != name:
- continue
- try:
- result = matcher.match(p_ver)
- break
- except UnsupportedVersionError:
- pass
- return result
-
- def __repr__(self):
- """
- Return a textual representation of this instance,
- """
- if self.source_url:
- suffix = ' [%s]' % self.source_url
- else:
- suffix = ''
- return '' % (self.name, self.version, suffix)
-
- def __eq__(self, other):
- """
- See if this distribution is the same as another.
- :param other: The distribution to compare with. To be equal to one
- another. distributions must have the same type, name,
- version and source_url.
- :return: True if it is the same, else False.
- """
- if type(other) is not type(self):
- result = False
- else:
- result = (self.name == other.name and
- self.version == other.version and
- self.source_url == other.source_url)
- return result
-
- def __hash__(self):
- """
- Compute hash in a way which matches the equality test.
- """
- return hash(self.name) + hash(self.version) + hash(self.source_url)
-
-
-class BaseInstalledDistribution(Distribution):
- """
- This is the base class for installed distributions (whether PEP 376 or
- legacy).
- """
-
- hasher = None
-
- def __init__(self, metadata, path, env=None):
- """
- Initialise an instance.
- :param metadata: An instance of :class:`Metadata` which describes the
- distribution. This will normally have been initialised
- from a metadata file in the ``path``.
- :param path: The path of the ``.dist-info`` or ``.egg-info``
- directory for the distribution.
- :param env: This is normally the :class:`DistributionPath`
- instance where this distribution was found.
- """
- super(BaseInstalledDistribution, self).__init__(metadata)
- self.path = path
- self.dist_path = env
-
- def get_hash(self, data, hasher=None):
- """
- Get the hash of some data, using a particular hash algorithm, if
- specified.
-
- :param data: The data to be hashed.
- :type data: bytes
- :param hasher: The name of a hash implementation, supported by hashlib,
- or ``None``. Examples of valid values are ``'sha1'``,
- ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
- ``'sha512'``. If no hasher is specified, the ``hasher``
- attribute of the :class:`InstalledDistribution` instance
- is used. If the hasher is determined to be ``None``, MD5
- is used as the hashing algorithm.
- :returns: The hash of the data. If a hasher was explicitly specified,
- the returned hash will be prefixed with the specified hasher
- followed by '='.
- :rtype: str
- """
- if hasher is None:
- hasher = self.hasher
- if hasher is None:
- hasher = hashlib.md5
- prefix = ''
- else:
- hasher = getattr(hashlib, hasher)
- prefix = '%s=' % self.hasher
- digest = hasher(data).digest()
- digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
- return '%s%s' % (prefix, digest)
-
-
-class InstalledDistribution(BaseInstalledDistribution):
- """
- Created with the *path* of the ``.dist-info`` directory provided to the
- constructor. It reads the metadata contained in ``pydist.json`` when it is
- instantiated., or uses a passed in Metadata instance (useful for when
- dry-run mode is being used).
- """
-
- hasher = 'sha256'
-
- def __init__(self, path, metadata=None, env=None):
- self.finder = finder = resources.finder_for_path(path)
- if finder is None:
- import pdb; pdb.set_trace ()
- if env and env._cache_enabled and path in env._cache.path:
- metadata = env._cache.path[path].metadata
- elif metadata is None:
- r = finder.find(METADATA_FILENAME)
- # Temporary - for Wheel 0.23 support
- if r is None:
- r = finder.find(WHEEL_METADATA_FILENAME)
- # Temporary - for legacy support
- if r is None:
- r = finder.find('METADATA')
- if r is None:
- raise ValueError('no %s found in %s' % (METADATA_FILENAME,
- path))
- with contextlib.closing(r.as_stream()) as stream:
- metadata = Metadata(fileobj=stream, scheme='legacy')
-
- super(InstalledDistribution, self).__init__(metadata, path, env)
-
- if env and env._cache_enabled:
- env._cache.add(self)
-
- try:
- r = finder.find('REQUESTED')
- except AttributeError:
- import pdb; pdb.set_trace ()
- self.requested = r is not None
-
- def __repr__(self):
- return '' % (
- self.name, self.version, self.path)
-
- def __str__(self):
- return "%s %s" % (self.name, self.version)
-
- def _get_records(self):
- """
- Get the list of installed files for the distribution
- :return: A list of tuples of path, hash and size. Note that hash and
- size might be ``None`` for some entries. The path is exactly
- as stored in the file (which is as in PEP 376).
- """
- results = []
- r = self.get_distinfo_resource('RECORD')
- with contextlib.closing(r.as_stream()) as stream:
- with CSVReader(stream=stream) as record_reader:
- # Base location is parent dir of .dist-info dir
- #base_location = os.path.dirname(self.path)
- #base_location = os.path.abspath(base_location)
- for row in record_reader:
- missing = [None for i in range(len(row), 3)]
- path, checksum, size = row + missing
- #if not os.path.isabs(path):
- # path = path.replace('/', os.sep)
- # path = os.path.join(base_location, path)
- results.append((path, checksum, size))
- return results
-
- @cached_property
- def exports(self):
- """
- Return the information exported by this distribution.
- :return: A dictionary of exports, mapping an export category to a dict
- of :class:`ExportEntry` instances describing the individual
- export entries, and keyed by name.
- """
- result = {}
- r = self.get_distinfo_resource(EXPORTS_FILENAME)
- if r:
- result = self.read_exports()
- return result
-
- def read_exports(self):
- """
- Read exports data from a file in .ini format.
-
- :return: A dictionary of exports, mapping an export category to a list
- of :class:`ExportEntry` instances describing the individual
- export entries.
- """
- result = {}
- r = self.get_distinfo_resource(EXPORTS_FILENAME)
- if r:
- with contextlib.closing(r.as_stream()) as stream:
- result = read_exports(stream)
- return result
-
- def write_exports(self, exports):
- """
- Write a dictionary of exports to a file in .ini format.
- :param exports: A dictionary of exports, mapping an export category to
- a list of :class:`ExportEntry` instances describing the
- individual export entries.
- """
- rf = self.get_distinfo_file(EXPORTS_FILENAME)
- with open(rf, 'w') as f:
- write_exports(exports, f)
-
- def get_resource_path(self, relative_path):
- """
- NOTE: This API may change in the future.
-
- Return the absolute path to a resource file with the given relative
- path.
-
- :param relative_path: The path, relative to .dist-info, of the resource
- of interest.
- :return: The absolute path where the resource is to be found.
- """
- r = self.get_distinfo_resource('RESOURCES')
- with contextlib.closing(r.as_stream()) as stream:
- with CSVReader(stream=stream) as resources_reader:
- for relative, destination in resources_reader:
- if relative == relative_path:
- return destination
- raise KeyError('no resource file with relative path %r '
- 'is installed' % relative_path)
-
- def list_installed_files(self):
- """
- Iterates over the ``RECORD`` entries and returns a tuple
- ``(path, hash, size)`` for each line.
-
- :returns: iterator of (path, hash, size)
- """
- for result in self._get_records():
- yield result
-
- def write_installed_files(self, paths, prefix, dry_run=False):
- """
- Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
- existing ``RECORD`` file is silently overwritten.
-
- prefix is used to determine when to write absolute paths.
- """
- prefix = os.path.join(prefix, '')
- base = os.path.dirname(self.path)
- base_under_prefix = base.startswith(prefix)
- base = os.path.join(base, '')
- record_path = self.get_distinfo_file('RECORD')
- logger.info('creating %s', record_path)
- if dry_run:
- return None
- with CSVWriter(record_path) as writer:
- for path in paths:
- if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
- # do not put size and hash, as in PEP-376
- hash_value = size = ''
- else:
- size = '%d' % os.path.getsize(path)
- with open(path, 'rb') as fp:
- hash_value = self.get_hash(fp.read())
- if path.startswith(base) or (base_under_prefix and
- path.startswith(prefix)):
- path = os.path.relpath(path, base)
- writer.writerow((path, hash_value, size))
-
- # add the RECORD file itself
- if record_path.startswith(base):
- record_path = os.path.relpath(record_path, base)
- writer.writerow((record_path, '', ''))
- return record_path
-
- def check_installed_files(self):
- """
- Checks that the hashes and sizes of the files in ``RECORD`` are
- matched by the files themselves. Returns a (possibly empty) list of
- mismatches. Each entry in the mismatch list will be a tuple consisting
- of the path, 'exists', 'size' or 'hash' according to what didn't match
- (existence is checked first, then size, then hash), the expected
- value and the actual value.
- """
- mismatches = []
- base = os.path.dirname(self.path)
- record_path = self.get_distinfo_file('RECORD')
- for path, hash_value, size in self.list_installed_files():
- if not os.path.isabs(path):
- path = os.path.join(base, path)
- if path == record_path:
- continue
- if not os.path.exists(path):
- mismatches.append((path, 'exists', True, False))
- elif os.path.isfile(path):
- actual_size = str(os.path.getsize(path))
- if size and actual_size != size:
- mismatches.append((path, 'size', size, actual_size))
- elif hash_value:
- if '=' in hash_value:
- hasher = hash_value.split('=', 1)[0]
- else:
- hasher = None
-
- with open(path, 'rb') as f:
- actual_hash = self.get_hash(f.read(), hasher)
- if actual_hash != hash_value:
- mismatches.append((path, 'hash', hash_value, actual_hash))
- return mismatches
-
- @cached_property
- def shared_locations(self):
- """
- A dictionary of shared locations whose keys are in the set 'prefix',
- 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
- The corresponding value is the absolute path of that category for
- this distribution, and takes into account any paths selected by the
- user at installation time (e.g. via command-line arguments). In the
- case of the 'namespace' key, this would be a list of absolute paths
- for the roots of namespace packages in this distribution.
-
- The first time this property is accessed, the relevant information is
- read from the SHARED file in the .dist-info directory.
- """
- result = {}
- shared_path = os.path.join(self.path, 'SHARED')
- if os.path.isfile(shared_path):
- with codecs.open(shared_path, 'r', encoding='utf-8') as f:
- lines = f.read().splitlines()
- for line in lines:
- key, value = line.split('=', 1)
- if key == 'namespace':
- result.setdefault(key, []).append(value)
- else:
- result[key] = value
- return result
-
- def write_shared_locations(self, paths, dry_run=False):
- """
- Write shared location information to the SHARED file in .dist-info.
- :param paths: A dictionary as described in the documentation for
- :meth:`shared_locations`.
- :param dry_run: If True, the action is logged but no file is actually
- written.
- :return: The path of the file written to.
- """
- shared_path = os.path.join(self.path, 'SHARED')
- logger.info('creating %s', shared_path)
- if dry_run:
- return None
- lines = []
- for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
- path = paths[key]
- if os.path.isdir(paths[key]):
- lines.append('%s=%s' % (key, path))
- for ns in paths.get('namespace', ()):
- lines.append('namespace=%s' % ns)
-
- with codecs.open(shared_path, 'w', encoding='utf-8') as f:
- f.write('\n'.join(lines))
- return shared_path
-
- def get_distinfo_resource(self, path):
- if path not in DIST_FILES:
- raise DistlibException('invalid path for a dist-info file: '
- '%r at %r' % (path, self.path))
- finder = resources.finder_for_path(self.path)
- if finder is None:
- raise DistlibException('Unable to get a finder for %s' % self.path)
- return finder.find(path)
-
- def get_distinfo_file(self, path):
- """
- Returns a path located under the ``.dist-info`` directory. Returns a
- string representing the path.
-
- :parameter path: a ``'/'``-separated path relative to the
- ``.dist-info`` directory or an absolute path;
- If *path* is an absolute path and doesn't start
- with the ``.dist-info`` directory path,
- a :class:`DistlibException` is raised
- :type path: str
- :rtype: str
- """
- # Check if it is an absolute path # XXX use relpath, add tests
- if path.find(os.sep) >= 0:
- # it's an absolute path?
- distinfo_dirname, path = path.split(os.sep)[-2:]
- if distinfo_dirname != self.path.split(os.sep)[-1]:
- raise DistlibException(
- 'dist-info file %r does not belong to the %r %s '
- 'distribution' % (path, self.name, self.version))
-
- # The file must be relative
- if path not in DIST_FILES:
- raise DistlibException('invalid path for a dist-info file: '
- '%r at %r' % (path, self.path))
-
- return os.path.join(self.path, path)
-
- def list_distinfo_files(self):
- """
- Iterates over the ``RECORD`` entries and returns paths for each line if
- the path is pointing to a file located in the ``.dist-info`` directory
- or one of its subdirectories.
-
- :returns: iterator of paths
- """
- base = os.path.dirname(self.path)
- for path, checksum, size in self._get_records():
- # XXX add separator or use real relpath algo
- if not os.path.isabs(path):
- path = os.path.join(base, path)
- if path.startswith(self.path):
- yield path
-
- def __eq__(self, other):
- return (isinstance(other, InstalledDistribution) and
- self.path == other.path)
-
- # See http://docs.python.org/reference/datamodel#object.__hash__
- __hash__ = object.__hash__
-
-
-class EggInfoDistribution(BaseInstalledDistribution):
- """Created with the *path* of the ``.egg-info`` directory or file provided
- to the constructor. It reads the metadata contained in the file itself, or
- if the given path happens to be a directory, the metadata is read from the
- file ``PKG-INFO`` under that directory."""
-
- requested = True # as we have no way of knowing, assume it was
- shared_locations = {}
-
- def __init__(self, path, env=None):
- def set_name_and_version(s, n, v):
- s.name = n
- s.key = n.lower() # for case-insensitive comparisons
- s.version = v
-
- self.path = path
- self.dist_path = env
- if env and env._cache_enabled and path in env._cache_egg.path:
- metadata = env._cache_egg.path[path].metadata
- set_name_and_version(self, metadata.name, metadata.version)
- else:
- metadata = self._get_metadata(path)
-
- # Need to be set before caching
- set_name_and_version(self, metadata.name, metadata.version)
-
- if env and env._cache_enabled:
- env._cache_egg.add(self)
- super(EggInfoDistribution, self).__init__(metadata, path, env)
-
- def _get_metadata(self, path):
- requires = None
-
- def parse_requires_data(data):
- """Create a list of dependencies from a requires.txt file.
-
- *data*: the contents of a setuptools-produced requires.txt file.
- """
- reqs = []
- lines = data.splitlines()
- for line in lines:
- line = line.strip()
- if line.startswith('['):
- logger.warning('Unexpected line: quitting requirement scan: %r',
- line)
- break
- r = parse_requirement(line)
- if not r:
- logger.warning('Not recognised as a requirement: %r', line)
- continue
- if r.extras:
- logger.warning('extra requirements in requires.txt are '
- 'not supported')
- if not r.constraints:
- reqs.append(r.name)
- else:
- cons = ', '.join('%s%s' % c for c in r.constraints)
- reqs.append('%s (%s)' % (r.name, cons))
- return reqs
-
- def parse_requires_path(req_path):
- """Create a list of dependencies from a requires.txt file.
-
- *req_path*: the path to a setuptools-produced requires.txt file.
- """
-
- reqs = []
- try:
- with codecs.open(req_path, 'r', 'utf-8') as fp:
- reqs = parse_requires_data(fp.read())
- except IOError:
- pass
- return reqs
-
- if path.endswith('.egg'):
- if os.path.isdir(path):
- meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
- metadata = Metadata(path=meta_path, scheme='legacy')
- req_path = os.path.join(path, 'EGG-INFO', 'requires.txt')
- requires = parse_requires_path(req_path)
- else:
- # FIXME handle the case where zipfile is not available
- zipf = zipimport.zipimporter(path)
- fileobj = StringIO(
- zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
- metadata = Metadata(fileobj=fileobj, scheme='legacy')
- try:
- data = zipf.get_data('EGG-INFO/requires.txt')
- requires = parse_requires_data(data.decode('utf-8'))
- except IOError:
- requires = None
- elif path.endswith('.egg-info'):
- if os.path.isdir(path):
- req_path = os.path.join(path, 'requires.txt')
- requires = parse_requires_path(req_path)
- path = os.path.join(path, 'PKG-INFO')
- metadata = Metadata(path=path, scheme='legacy')
- else:
- raise DistlibException('path must end with .egg-info or .egg, '
- 'got %r' % path)
-
- if requires:
- metadata.add_requirements(requires)
- return metadata
-
- def __repr__(self):
- return '' % (
- self.name, self.version, self.path)
-
- def __str__(self):
- return "%s %s" % (self.name, self.version)
-
- def check_installed_files(self):
- """
- Checks that the hashes and sizes of the files in ``RECORD`` are
- matched by the files themselves. Returns a (possibly empty) list of
- mismatches. Each entry in the mismatch list will be a tuple consisting
- of the path, 'exists', 'size' or 'hash' according to what didn't match
- (existence is checked first, then size, then hash), the expected
- value and the actual value.
- """
- mismatches = []
- record_path = os.path.join(self.path, 'installed-files.txt')
- if os.path.exists(record_path):
- for path, _, _ in self.list_installed_files():
- if path == record_path:
- continue
- if not os.path.exists(path):
- mismatches.append((path, 'exists', True, False))
- return mismatches
-
- def list_installed_files(self):
- """
- Iterates over the ``installed-files.txt`` entries and returns a tuple
- ``(path, hash, size)`` for each line.
-
- :returns: a list of (path, hash, size)
- """
-
- def _md5(path):
- f = open(path, 'rb')
- try:
- content = f.read()
- finally:
- f.close()
- return hashlib.md5(content).hexdigest()
-
- def _size(path):
- return os.stat(path).st_size
-
- record_path = os.path.join(self.path, 'installed-files.txt')
- result = []
- if os.path.exists(record_path):
- with codecs.open(record_path, 'r', encoding='utf-8') as f:
- for line in f:
- line = line.strip()
- p = os.path.normpath(os.path.join(self.path, line))
- # "./" is present as a marker between installed files
- # and installation metadata files
- if not os.path.exists(p):
- logger.warning('Non-existent file: %s', p)
- if p.endswith(('.pyc', '.pyo')):
- continue
- #otherwise fall through and fail
- if not os.path.isdir(p):
- result.append((p, _md5(p), _size(p)))
- result.append((record_path, None, None))
- return result
-
- def list_distinfo_files(self, absolute=False):
- """
- Iterates over the ``installed-files.txt`` entries and returns paths for
- each line if the path is pointing to a file located in the
- ``.egg-info`` directory or one of its subdirectories.
-
- :parameter absolute: If *absolute* is ``True``, each returned path is
- transformed into a local absolute path. Otherwise the
- raw value from ``installed-files.txt`` is returned.
- :type absolute: boolean
- :returns: iterator of paths
- """
- record_path = os.path.join(self.path, 'installed-files.txt')
- skip = True
- with codecs.open(record_path, 'r', encoding='utf-8') as f:
- for line in f:
- line = line.strip()
- if line == './':
- skip = False
- continue
- if not skip:
- p = os.path.normpath(os.path.join(self.path, line))
- if p.startswith(self.path):
- if absolute:
- yield p
- else:
- yield line
-
- def __eq__(self, other):
- return (isinstance(other, EggInfoDistribution) and
- self.path == other.path)
-
- # See http://docs.python.org/reference/datamodel#object.__hash__
- __hash__ = object.__hash__
-
-new_dist_class = InstalledDistribution
-old_dist_class = EggInfoDistribution
-
-
-class DependencyGraph(object):
- """
- Represents a dependency graph between distributions.
-
- The dependency relationships are stored in an ``adjacency_list`` that maps
- distributions to a list of ``(other, label)`` tuples where ``other``
- is a distribution and the edge is labeled with ``label`` (i.e. the version
- specifier, if such was provided). Also, for more efficient traversal, for
- every distribution ``x``, a list of predecessors is kept in
- ``reverse_list[x]``. An edge from distribution ``a`` to
- distribution ``b`` means that ``a`` depends on ``b``. If any missing
- dependencies are found, they are stored in ``missing``, which is a
- dictionary that maps distributions to a list of requirements that were not
- provided by any other distributions.
- """
-
- def __init__(self):
- self.adjacency_list = {}
- self.reverse_list = {}
- self.missing = {}
-
- def add_distribution(self, distribution):
- """Add the *distribution* to the graph.
-
- :type distribution: :class:`distutils2.database.InstalledDistribution`
- or :class:`distutils2.database.EggInfoDistribution`
- """
- self.adjacency_list[distribution] = []
- self.reverse_list[distribution] = []
- #self.missing[distribution] = []
-
- def add_edge(self, x, y, label=None):
- """Add an edge from distribution *x* to distribution *y* with the given
- *label*.
-
- :type x: :class:`distutils2.database.InstalledDistribution` or
- :class:`distutils2.database.EggInfoDistribution`
- :type y: :class:`distutils2.database.InstalledDistribution` or
- :class:`distutils2.database.EggInfoDistribution`
- :type label: ``str`` or ``None``
- """
- self.adjacency_list[x].append((y, label))
- # multiple edges are allowed, so be careful
- if x not in self.reverse_list[y]:
- self.reverse_list[y].append(x)
-
- def add_missing(self, distribution, requirement):
- """
- Add a missing *requirement* for the given *distribution*.
-
- :type distribution: :class:`distutils2.database.InstalledDistribution`
- or :class:`distutils2.database.EggInfoDistribution`
- :type requirement: ``str``
- """
- logger.debug('%s missing %r', distribution, requirement)
- self.missing.setdefault(distribution, []).append(requirement)
-
- def _repr_dist(self, dist):
- return '%s %s' % (dist.name, dist.version)
-
- def repr_node(self, dist, level=1):
- """Prints only a subgraph"""
- output = [self._repr_dist(dist)]
- for other, label in self.adjacency_list[dist]:
- dist = self._repr_dist(other)
- if label is not None:
- dist = '%s [%s]' % (dist, label)
- output.append(' ' * level + str(dist))
- suboutput = self.repr_node(other, level + 1)
- subs = suboutput.split('\n')
- output.extend(subs[1:])
- return '\n'.join(output)
-
- def to_dot(self, f, skip_disconnected=True):
- """Writes a DOT output for the graph to the provided file *f*.
-
- If *skip_disconnected* is set to ``True``, then all distributions
- that are not dependent on any other distribution are skipped.
-
- :type f: has to support ``file``-like operations
- :type skip_disconnected: ``bool``
- """
- disconnected = []
-
- f.write("digraph dependencies {\n")
- for dist, adjs in self.adjacency_list.items():
- if len(adjs) == 0 and not skip_disconnected:
- disconnected.append(dist)
- for other, label in adjs:
- if not label is None:
- f.write('"%s" -> "%s" [label="%s"]\n' %
- (dist.name, other.name, label))
- else:
- f.write('"%s" -> "%s"\n' % (dist.name, other.name))
- if not skip_disconnected and len(disconnected) > 0:
- f.write('subgraph disconnected {\n')
- f.write('label = "Disconnected"\n')
- f.write('bgcolor = red\n')
-
- for dist in disconnected:
- f.write('"%s"' % dist.name)
- f.write('\n')
- f.write('}\n')
- f.write('}\n')
-
- def topological_sort(self):
- """
- Perform a topological sort of the graph.
- :return: A tuple, the first element of which is a topologically sorted
- list of distributions, and the second element of which is a
- list of distributions that cannot be sorted because they have
- circular dependencies and so form a cycle.
- """
- result = []
- # Make a shallow copy of the adjacency list
- alist = {}
- for k, v in self.adjacency_list.items():
- alist[k] = v[:]
- while True:
- # See what we can remove in this run
- to_remove = []
- for k, v in list(alist.items())[:]:
- if not v:
- to_remove.append(k)
- del alist[k]
- if not to_remove:
- # What's left in alist (if anything) is a cycle.
- break
- # Remove from the adjacency list of others
- for k, v in alist.items():
- alist[k] = [(d, r) for d, r in v if d not in to_remove]
- logger.debug('Moving to result: %s',
- ['%s (%s)' % (d.name, d.version) for d in to_remove])
- result.extend(to_remove)
- return result, list(alist.keys())
-
- def __repr__(self):
- """Representation of the graph"""
- output = []
- for dist, adjs in self.adjacency_list.items():
- output.append(self.repr_node(dist))
- return '\n'.join(output)
-
-
-def make_graph(dists, scheme='default'):
- """Makes a dependency graph from the given distributions.
-
- :parameter dists: a list of distributions
- :type dists: list of :class:`distutils2.database.InstalledDistribution` and
- :class:`distutils2.database.EggInfoDistribution` instances
- :rtype: a :class:`DependencyGraph` instance
- """
- scheme = get_scheme(scheme)
- graph = DependencyGraph()
- provided = {} # maps names to lists of (version, dist) tuples
-
- # first, build the graph and find out what's provided
- for dist in dists:
- graph.add_distribution(dist)
-
- for p in dist.provides:
- name, version = parse_name_and_version(p)
- logger.debug('Add to provided: %s, %s, %s', name, version, dist)
- provided.setdefault(name, []).append((version, dist))
-
- # now make the edges
- for dist in dists:
- requires = (dist.run_requires | dist.meta_requires |
- dist.build_requires | dist.dev_requires)
- for req in requires:
- try:
- matcher = scheme.matcher(req)
- except UnsupportedVersionError:
- # XXX compat-mode if cannot read the version
- logger.warning('could not read version %r - using name only',
- req)
- name = req.split()[0]
- matcher = scheme.matcher(name)
-
- name = matcher.key # case-insensitive
-
- matched = False
- if name in provided:
- for version, provider in provided[name]:
- try:
- match = matcher.match(version)
- except UnsupportedVersionError:
- match = False
-
- if match:
- graph.add_edge(dist, provider, req)
- matched = True
- break
- if not matched:
- graph.add_missing(dist, req)
- return graph
-
-
-def get_dependent_dists(dists, dist):
- """Recursively generate a list of distributions from *dists* that are
- dependent on *dist*.
-
- :param dists: a list of distributions
- :param dist: a distribution, member of *dists* for which we are interested
- """
- if dist not in dists:
- raise DistlibException('given distribution %r is not a member '
- 'of the list' % dist.name)
- graph = make_graph(dists)
-
- dep = [dist] # dependent distributions
- todo = graph.reverse_list[dist] # list of nodes we should inspect
-
- while todo:
- d = todo.pop()
- dep.append(d)
- for succ in graph.reverse_list[d]:
- if succ not in dep:
- todo.append(succ)
-
- dep.pop(0) # remove dist from dep, was there to prevent infinite loops
- return dep
-
-
-def get_required_dists(dists, dist):
- """Recursively generate a list of distributions from *dists* that are
- required by *dist*.
-
- :param dists: a list of distributions
- :param dist: a distribution, member of *dists* for which we are interested
- """
- if dist not in dists:
- raise DistlibException('given distribution %r is not a member '
- 'of the list' % dist.name)
- graph = make_graph(dists)
-
- req = [] # required distributions
- todo = graph.adjacency_list[dist] # list of nodes we should inspect
-
- while todo:
- d = todo.pop()[0]
- req.append(d)
- for pred in graph.adjacency_list[d]:
- if pred not in req:
- todo.append(pred)
-
- return req
-
-
-def make_dist(name, version, **kwargs):
- """
- A convenience method for making a dist given just a name and version.
- """
- summary = kwargs.pop('summary', 'Placeholder for summary')
- md = Metadata(**kwargs)
- md.name = name
- md.version = version
- md.summary = summary or 'Placeholder for summary'
- return Distribution(md)
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/index.py b/env/Lib/site-packages/pip/_vendor/distlib/index.py
deleted file mode 100644
index 6803dd2..0000000
--- a/env/Lib/site-packages/pip/_vendor/distlib/index.py
+++ /dev/null
@@ -1,515 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2013 Vinay Sajip.
-# Licensed to the Python Software Foundation under a contributor agreement.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-import hashlib
-import logging
-import os
-import shutil
-import subprocess
-import tempfile
-try:
- from threading import Thread
-except ImportError:
- from dummy_threading import Thread
-
-from . import DistlibException
-from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
- urlparse, build_opener, string_types)
-from .util import cached_property, zip_dir, ServerProxy
-
-logger = logging.getLogger(__name__)
-
-DEFAULT_INDEX = 'https://pypi.python.org/pypi'
-DEFAULT_REALM = 'pypi'
-
-class PackageIndex(object):
- """
- This class represents a package index compatible with PyPI, the Python
- Package Index.
- """
-
- boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
-
- def __init__(self, url=None):
- """
- Initialise an instance.
-
- :param url: The URL of the index. If not specified, the URL for PyPI is
- used.
- """
- self.url = url or DEFAULT_INDEX
- self.read_configuration()
- scheme, netloc, path, params, query, frag = urlparse(self.url)
- if params or query or frag or scheme not in ('http', 'https'):
- raise DistlibException('invalid repository: %s' % self.url)
- self.password_handler = None
- self.ssl_verifier = None
- self.gpg = None
- self.gpg_home = None
- self.rpc_proxy = None
- with open(os.devnull, 'w') as sink:
- # Use gpg by default rather than gpg2, as gpg2 insists on
- # prompting for passwords
- for s in ('gpg', 'gpg2'):
- try:
- rc = subprocess.check_call([s, '--version'], stdout=sink,
- stderr=sink)
- if rc == 0:
- self.gpg = s
- break
- except OSError:
- pass
-
- def _get_pypirc_command(self):
- """
- Get the distutils command for interacting with PyPI configurations.
- :return: the command.
- """
- from distutils.core import Distribution
- from distutils.config import PyPIRCCommand
- d = Distribution()
- return PyPIRCCommand(d)
-
- def read_configuration(self):
- """
- Read the PyPI access configuration as supported by distutils, getting
- PyPI to do the actual work. This populates ``username``, ``password``,
- ``realm`` and ``url`` attributes from the configuration.
- """
- # get distutils to do the work
- c = self._get_pypirc_command()
- c.repository = self.url
- cfg = c._read_pypirc()
- self.username = cfg.get('username')
- self.password = cfg.get('password')
- self.realm = cfg.get('realm', 'pypi')
- self.url = cfg.get('repository', self.url)
-
- def save_configuration(self):
- """
- Save the PyPI access configuration. You must have set ``username`` and
- ``password`` attributes before calling this method.
-
- Again, distutils is used to do the actual work.
- """
- self.check_credentials()
- # get distutils to do the work
- c = self._get_pypirc_command()
- c._store_pypirc(self.username, self.password)
-
- def check_credentials(self):
- """
- Check that ``username`` and ``password`` have been set, and raise an
- exception if not.
- """
- if self.username is None or self.password is None:
- raise DistlibException('username and password must be set')
- pm = HTTPPasswordMgr()
- _, netloc, _, _, _, _ = urlparse(self.url)
- pm.add_password(self.realm, netloc, self.username, self.password)
- self.password_handler = HTTPBasicAuthHandler(pm)
-
- def register(self, metadata):
- """
- Register a distribution on PyPI, using the provided metadata.
-
- :param metadata: A :class:`Metadata` instance defining at least a name
- and version number for the distribution to be
- registered.
- :return: The HTTP response received from PyPI upon submission of the
- request.
- """
- self.check_credentials()
- metadata.validate()
- d = metadata.todict()
- d[':action'] = 'verify'
- request = self.encode_request(d.items(), [])
- response = self.send_request(request)
- d[':action'] = 'submit'
- request = self.encode_request(d.items(), [])
- return self.send_request(request)
-
- def _reader(self, name, stream, outbuf):
- """
- Thread runner for reading lines of from a subprocess into a buffer.
-
- :param name: The logical name of the stream (used for logging only).
- :param stream: The stream to read from. This will typically a pipe
- connected to the output stream of a subprocess.
- :param outbuf: The list to append the read lines to.
- """
- while True:
- s = stream.readline()
- if not s:
- break
- s = s.decode('utf-8').rstrip()
- outbuf.append(s)
- logger.debug('%s: %s' % (name, s))
- stream.close()
-
- def get_sign_command(self, filename, signer, sign_password,
- keystore=None):
- """
- Return a suitable command for signing a file.
-
- :param filename: The pathname to the file to be signed.
- :param signer: The identifier of the signer of the file.
- :param sign_password: The passphrase for the signer's
- private key used for signing.
- :param keystore: The path to a directory which contains the keys
- used in verification. If not specified, the
- instance's ``gpg_home`` attribute is used instead.
- :return: The signing command as a list suitable to be
- passed to :class:`subprocess.Popen`.
- """
- cmd = [self.gpg, '--status-fd', '2', '--no-tty']
- if keystore is None:
- keystore = self.gpg_home
- if keystore:
- cmd.extend(['--homedir', keystore])
- if sign_password is not None:
- cmd.extend(['--batch', '--passphrase-fd', '0'])
- td = tempfile.mkdtemp()
- sf = os.path.join(td, os.path.basename(filename) + '.asc')
- cmd.extend(['--detach-sign', '--armor', '--local-user',
- signer, '--output', sf, filename])
- logger.debug('invoking: %s', ' '.join(cmd))
- return cmd, sf
-
- def run_command(self, cmd, input_data=None):
- """
- Run a command in a child process , passing it any input data specified.
-
- :param cmd: The command to run.
- :param input_data: If specified, this must be a byte string containing
- data to be sent to the child process.
- :return: A tuple consisting of the subprocess' exit code, a list of
- lines read from the subprocess' ``stdout``, and a list of
- lines read from the subprocess' ``stderr``.
- """
- kwargs = {
- 'stdout': subprocess.PIPE,
- 'stderr': subprocess.PIPE,
- }
- if input_data is not None:
- kwargs['stdin'] = subprocess.PIPE
- stdout = []
- stderr = []
- p = subprocess.Popen(cmd, **kwargs)
- # We don't use communicate() here because we may need to
- # get clever with interacting with the command
- t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
- t1.start()
- t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
- t2.start()
- if input_data is not None:
- p.stdin.write(input_data)
- p.stdin.close()
-
- p.wait()
- t1.join()
- t2.join()
- return p.returncode, stdout, stderr
-
- def sign_file(self, filename, signer, sign_password, keystore=None):
- """
- Sign a file.
-
- :param filename: The pathname to the file to be signed.
- :param signer: The identifier of the signer of the file.
- :param sign_password: The passphrase for the signer's
- private key used for signing.
- :param keystore: The path to a directory which contains the keys
- used in signing. If not specified, the instance's
- ``gpg_home`` attribute is used instead.
- :return: The absolute pathname of the file where the signature is
- stored.
- """
- cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
- keystore)
- rc, stdout, stderr = self.run_command(cmd,
- sign_password.encode('utf-8'))
- if rc != 0:
- raise DistlibException('sign command failed with error '
- 'code %s' % rc)
- return sig_file
-
- def upload_file(self, metadata, filename, signer=None, sign_password=None,
- filetype='sdist', pyversion='source', keystore=None):
- """
- Upload a release file to the index.
-
- :param metadata: A :class:`Metadata` instance defining at least a name
- and version number for the file to be uploaded.
- :param filename: The pathname of the file to be uploaded.
- :param signer: The identifier of the signer of the file.
- :param sign_password: The passphrase for the signer's
- private key used for signing.
- :param filetype: The type of the file being uploaded. This is the
- distutils command which produced that file, e.g.
- ``sdist`` or ``bdist_wheel``.
- :param pyversion: The version of Python which the release relates
- to. For code compatible with any Python, this would
- be ``source``, otherwise it would be e.g. ``3.2``.
- :param keystore: The path to a directory which contains the keys
- used in signing. If not specified, the instance's
- ``gpg_home`` attribute is used instead.
- :return: The HTTP response received from PyPI upon submission of the
- request.
- """
- self.check_credentials()
- if not os.path.exists(filename):
- raise DistlibException('not found: %s' % filename)
- metadata.validate()
- d = metadata.todict()
- sig_file = None
- if signer:
- if not self.gpg:
- logger.warning('no signing program available - not signed')
- else:
- sig_file = self.sign_file(filename, signer, sign_password,
- keystore)
- with open(filename, 'rb') as f:
- file_data = f.read()
- md5_digest = hashlib.md5(file_data).hexdigest()
- sha256_digest = hashlib.sha256(file_data).hexdigest()
- d.update({
- ':action': 'file_upload',
- 'protocol_version': '1',
- 'filetype': filetype,
- 'pyversion': pyversion,
- 'md5_digest': md5_digest,
- 'sha256_digest': sha256_digest,
- })
- files = [('content', os.path.basename(filename), file_data)]
- if sig_file:
- with open(sig_file, 'rb') as f:
- sig_data = f.read()
- files.append(('gpg_signature', os.path.basename(sig_file),
- sig_data))
- shutil.rmtree(os.path.dirname(sig_file))
- request = self.encode_request(d.items(), files)
- return self.send_request(request)
-
- def upload_documentation(self, metadata, doc_dir):
- """
- Upload documentation to the index.
-
- :param metadata: A :class:`Metadata` instance defining at least a name
- and version number for the documentation to be
- uploaded.
- :param doc_dir: The pathname of the directory which contains the
- documentation. This should be the directory that
- contains the ``index.html`` for the documentation.
- :return: The HTTP response received from PyPI upon submission of the
- request.
- """
- self.check_credentials()
- if not os.path.isdir(doc_dir):
- raise DistlibException('not a directory: %r' % doc_dir)
- fn = os.path.join(doc_dir, 'index.html')
- if not os.path.exists(fn):
- raise DistlibException('not found: %r' % fn)
- metadata.validate()
- name, version = metadata.name, metadata.version
- zip_data = zip_dir(doc_dir).getvalue()
- fields = [(':action', 'doc_upload'),
- ('name', name), ('version', version)]
- files = [('content', name, zip_data)]
- request = self.encode_request(fields, files)
- return self.send_request(request)
-
- def get_verify_command(self, signature_filename, data_filename,
- keystore=None):
- """
- Return a suitable command for verifying a file.
-
- :param signature_filename: The pathname to the file containing the
- signature.
- :param data_filename: The pathname to the file containing the
- signed data.
- :param keystore: The path to a directory which contains the keys
- used in verification. If not specified, the
- instance's ``gpg_home`` attribute is used instead.
- :return: The verifying command as a list suitable to be
- passed to :class:`subprocess.Popen`.
- """
- cmd = [self.gpg, '--status-fd', '2', '--no-tty']
- if keystore is None:
- keystore = self.gpg_home
- if keystore:
- cmd.extend(['--homedir', keystore])
- cmd.extend(['--verify', signature_filename, data_filename])
- logger.debug('invoking: %s', ' '.join(cmd))
- return cmd
-
- def verify_signature(self, signature_filename, data_filename,
- keystore=None):
- """
- Verify a signature for a file.
-
- :param signature_filename: The pathname to the file containing the
- signature.
- :param data_filename: The pathname to the file containing the
- signed data.
- :param keystore: The path to a directory which contains the keys
- used in verification. If not specified, the
- instance's ``gpg_home`` attribute is used instead.
- :return: True if the signature was verified, else False.
- """
- if not self.gpg:
- raise DistlibException('verification unavailable because gpg '
- 'unavailable')
- cmd = self.get_verify_command(signature_filename, data_filename,
- keystore)
- rc, stdout, stderr = self.run_command(cmd)
- if rc not in (0, 1):
- raise DistlibException('verify command failed with error '
- 'code %s' % rc)
- return rc == 0
-
- def download_file(self, url, destfile, digest=None, reporthook=None):
- """
- This is a convenience method for downloading a file from an URL.
- Normally, this will be a file from the index, though currently
- no check is made for this (i.e. a file can be downloaded from
- anywhere).
-
- The method is just like the :func:`urlretrieve` function in the
- standard library, except that it allows digest computation to be
- done during download and checking that the downloaded data
- matched any expected value.
-
- :param url: The URL of the file to be downloaded (assumed to be
- available via an HTTP GET request).
- :param destfile: The pathname where the downloaded file is to be
- saved.
- :param digest: If specified, this must be a (hasher, value)
- tuple, where hasher is the algorithm used (e.g.
- ``'md5'``) and ``value`` is the expected value.
- :param reporthook: The same as for :func:`urlretrieve` in the
- standard library.
- """
- if digest is None:
- digester = None
- logger.debug('No digest specified')
- else:
- if isinstance(digest, (list, tuple)):
- hasher, digest = digest
- else:
- hasher = 'md5'
- digester = getattr(hashlib, hasher)()
- logger.debug('Digest specified: %s' % digest)
- # The following code is equivalent to urlretrieve.
- # We need to do it this way so that we can compute the
- # digest of the file as we go.
- with open(destfile, 'wb') as dfp:
- # addinfourl is not a context manager on 2.x
- # so we have to use try/finally
- sfp = self.send_request(Request(url))
- try:
- headers = sfp.info()
- blocksize = 8192
- size = -1
- read = 0
- blocknum = 0
- if "content-length" in headers:
- size = int(headers["Content-Length"])
- if reporthook:
- reporthook(blocknum, blocksize, size)
- while True:
- block = sfp.read(blocksize)
- if not block:
- break
- read += len(block)
- dfp.write(block)
- if digester:
- digester.update(block)
- blocknum += 1
- if reporthook:
- reporthook(blocknum, blocksize, size)
- finally:
- sfp.close()
-
- # check that we got the whole file, if we can
- if size >= 0 and read < size:
- raise DistlibException(
- 'retrieval incomplete: got only %d out of %d bytes'
- % (read, size))
- # if we have a digest, it must match.
- if digester:
- actual = digester.hexdigest()
- if digest != actual:
- raise DistlibException('%s digest mismatch for %s: expected '
- '%s, got %s' % (hasher, destfile,
- digest, actual))
- logger.debug('Digest verified: %s', digest)
-
- def send_request(self, req):
- """
- Send a standard library :class:`Request` to PyPI and return its
- response.
-
- :param req: The request to send.
- :return: The HTTP response from PyPI (a standard library HTTPResponse).
- """
- handlers = []
- if self.password_handler:
- handlers.append(self.password_handler)
- if self.ssl_verifier:
- handlers.append(self.ssl_verifier)
- opener = build_opener(*handlers)
- return opener.open(req)
-
- def encode_request(self, fields, files):
- """
- Encode fields and files for posting to an HTTP server.
-
- :param fields: The fields to send as a list of (fieldname, value)
- tuples.
- :param files: The files to send as a list of (fieldname, filename,
- file_bytes) tuple.
- """
- # Adapted from packaging, which in turn was adapted from
- # http://code.activestate.com/recipes/146306
-
- parts = []
- boundary = self.boundary
- for k, values in fields:
- if not isinstance(values, (list, tuple)):
- values = [values]
-
- for v in values:
- parts.extend((
- b'--' + boundary,
- ('Content-Disposition: form-data; name="%s"' %
- k).encode('utf-8'),
- b'',
- v.encode('utf-8')))
- for key, filename, value in files:
- parts.extend((
- b'--' + boundary,
- ('Content-Disposition: form-data; name="%s"; filename="%s"' %
- (key, filename)).encode('utf-8'),
- b'',
- value))
-
- parts.extend((b'--' + boundary + b'--', b''))
-
- body = b'\r\n'.join(parts)
- ct = b'multipart/form-data; boundary=' + boundary
- headers = {
- 'Content-type': ct,
- 'Content-length': str(len(body))
- }
- return Request(self.url, body, headers)
-
- def search(self, terms, operator=None):
- if isinstance(terms, string_types):
- terms = {'name': terms}
- if self.rpc_proxy is None:
- self.rpc_proxy = ServerProxy(self.url, timeout=3.0)
- return self.rpc_proxy.search(terms, operator or 'and')
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/locators.py b/env/Lib/site-packages/pip/_vendor/distlib/locators.py
deleted file mode 100644
index 14789ef..0000000
--- a/env/Lib/site-packages/pip/_vendor/distlib/locators.py
+++ /dev/null
@@ -1,1283 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2012-2015 Vinay Sajip.
-# Licensed to the Python Software Foundation under a contributor agreement.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-
-import gzip
-from io import BytesIO
-import json
-import logging
-import os
-import posixpath
-import re
-try:
- import threading
-except ImportError: # pragma: no cover
- import dummy_threading as threading
-import zlib
-
-from . import DistlibException
-from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
- queue, quote, unescape, string_types, build_opener,
- HTTPRedirectHandler as BaseRedirectHandler, text_type,
- Request, HTTPError, URLError)
-from .database import Distribution, DistributionPath, make_dist
-from .metadata import Metadata
-from .util import (cached_property, parse_credentials, ensure_slash,
- split_filename, get_project_data, parse_requirement,
- parse_name_and_version, ServerProxy, normalize_name)
-from .version import get_scheme, UnsupportedVersionError
-from .wheel import Wheel, is_compatible
-
-logger = logging.getLogger(__name__)
-
-HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)')
-CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
-HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
-DEFAULT_INDEX = 'https://pypi.python.org/pypi'
-
-def get_all_distribution_names(url=None):
- """
- Return all distribution names known by an index.
- :param url: The URL of the index.
- :return: A list of all known distribution names.
- """
- if url is None:
- url = DEFAULT_INDEX
- client = ServerProxy(url, timeout=3.0)
- return client.list_packages()
-
-class RedirectHandler(BaseRedirectHandler):
- """
- A class to work around a bug in some Python 3.2.x releases.
- """
- # There's a bug in the base version for some 3.2.x
- # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
- # returns e.g. /abc, it bails because it says the scheme ''
- # is bogus, when actually it should use the request's
- # URL for the scheme. See Python issue #13696.
- def http_error_302(self, req, fp, code, msg, headers):
- # Some servers (incorrectly) return multiple Location headers
- # (so probably same goes for URI). Use first header.
- newurl = None
- for key in ('location', 'uri'):
- if key in headers:
- newurl = headers[key]
- break
- if newurl is None:
- return
- urlparts = urlparse(newurl)
- if urlparts.scheme == '':
- newurl = urljoin(req.get_full_url(), newurl)
- if hasattr(headers, 'replace_header'):
- headers.replace_header(key, newurl)
- else:
- headers[key] = newurl
- return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
- headers)
-
- http_error_301 = http_error_303 = http_error_307 = http_error_302
-
-class Locator(object):
- """
- A base class for locators - things that locate distributions.
- """
- source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
- binary_extensions = ('.egg', '.exe', '.whl')
- excluded_extensions = ('.pdf',)
-
- # A list of tags indicating which wheels you want to match. The default
- # value of None matches against the tags compatible with the running
- # Python. If you want to match other values, set wheel_tags on a locator
- # instance to a list of tuples (pyver, abi, arch) which you want to match.
- wheel_tags = None
-
- downloadable_extensions = source_extensions + ('.whl',)
-
- def __init__(self, scheme='default'):
- """
- Initialise an instance.
- :param scheme: Because locators look for most recent versions, they
- need to know the version scheme to use. This specifies
- the current PEP-recommended scheme - use ``'legacy'``
- if you need to support existing distributions on PyPI.
- """
- self._cache = {}
- self.scheme = scheme
- # Because of bugs in some of the handlers on some of the platforms,
- # we use our own opener rather than just using urlopen.
- self.opener = build_opener(RedirectHandler())
- # If get_project() is called from locate(), the matcher instance
- # is set from the requirement passed to locate(). See issue #18 for
- # why this can be useful to know.
- self.matcher = None
- self.errors = queue.Queue()
-
- def get_errors(self):
- """
- Return any errors which have occurred.
- """
- result = []
- while not self.errors.empty(): # pragma: no cover
- try:
- e = self.errors.get(False)
- result.append(e)
- except self.errors.Empty:
- continue
- self.errors.task_done()
- return result
-
- def clear_errors(self):
- """
- Clear any errors which may have been logged.
- """
- # Just get the errors and throw them away
- self.get_errors()
-
- def clear_cache(self):
- self._cache.clear()
-
- def _get_scheme(self):
- return self._scheme
-
- def _set_scheme(self, value):
- self._scheme = value
-
- scheme = property(_get_scheme, _set_scheme)
-
- def _get_project(self, name):
- """
- For a given project, get a dictionary mapping available versions to Distribution
- instances.
-
- This should be implemented in subclasses.
-
- If called from a locate() request, self.matcher will be set to a
- matcher for the requirement to satisfy, otherwise it will be None.
- """
- raise NotImplementedError('Please implement in the subclass')
-
- def get_distribution_names(self):
- """
- Return all the distribution names known to this locator.
- """
- raise NotImplementedError('Please implement in the subclass')
-
- def get_project(self, name):
- """
- For a given project, get a dictionary mapping available versions to Distribution
- instances.
-
- This calls _get_project to do all the work, and just implements a caching layer on top.
- """
- if self._cache is None:
- result = self._get_project(name)
- elif name in self._cache:
- result = self._cache[name]
- else:
- self.clear_errors()
- result = self._get_project(name)
- self._cache[name] = result
- return result
-
- def score_url(self, url):
- """
- Give an url a score which can be used to choose preferred URLs
- for a given project release.
- """
- t = urlparse(url)
- basename = posixpath.basename(t.path)
- compatible = True
- is_wheel = basename.endswith('.whl')
- if is_wheel:
- compatible = is_compatible(Wheel(basename), self.wheel_tags)
- return (t.scheme != 'https', 'pypi.python.org' in t.netloc,
- is_wheel, compatible, basename)
-
- def prefer_url(self, url1, url2):
- """
- Choose one of two URLs where both are candidates for distribution
- archives for the same version of a distribution (for example,
- .tar.gz vs. zip).
-
- The current implementation favours https:// URLs over http://, archives
- from PyPI over those from other locations, wheel compatibility (if a
- wheel) and then the archive name.
- """
- result = url2
- if url1:
- s1 = self.score_url(url1)
- s2 = self.score_url(url2)
- if s1 > s2:
- result = url1
- if result != url2:
- logger.debug('Not replacing %r with %r', url1, url2)
- else:
- logger.debug('Replacing %r with %r', url1, url2)
- return result
-
- def split_filename(self, filename, project_name):
- """
- Attempt to split a filename in project name, version and Python version.
- """
- return split_filename(filename, project_name)
-
- def convert_url_to_download_info(self, url, project_name):
- """
- See if a URL is a candidate for a download URL for a project (the URL
- has typically been scraped from an HTML page).
-
- If it is, a dictionary is returned with keys "name", "version",
- "filename" and "url"; otherwise, None is returned.
- """
- def same_project(name1, name2):
- return normalize_name(name1) == normalize_name(name2)
-
- result = None
- scheme, netloc, path, params, query, frag = urlparse(url)
- if frag.lower().startswith('egg='):
- logger.debug('%s: version hint in fragment: %r',
- project_name, frag)
- m = HASHER_HASH.match(frag)
- if m:
- algo, digest = m.groups()
- else:
- algo, digest = None, None
- origpath = path
- if path and path[-1] == '/':
- path = path[:-1]
- if path.endswith('.whl'):
- try:
- wheel = Wheel(path)
- if is_compatible(wheel, self.wheel_tags):
- if project_name is None:
- include = True
- else:
- include = same_project(wheel.name, project_name)
- if include:
- result = {
- 'name': wheel.name,
- 'version': wheel.version,
- 'filename': wheel.filename,
- 'url': urlunparse((scheme, netloc, origpath,
- params, query, '')),
- 'python-version': ', '.join(
- ['.'.join(list(v[2:])) for v in wheel.pyver]),
- }
- except Exception as e: # pragma: no cover
- logger.warning('invalid path for wheel: %s', path)
- elif path.endswith(self.downloadable_extensions):
- path = filename = posixpath.basename(path)
- for ext in self.downloadable_extensions:
- if path.endswith(ext):
- path = path[:-len(ext)]
- t = self.split_filename(path, project_name)
- if not t:
- logger.debug('No match for project/version: %s', path)
- else:
- name, version, pyver = t
- if not project_name or same_project(project_name, name):
- result = {
- 'name': name,
- 'version': version,
- 'filename': filename,
- 'url': urlunparse((scheme, netloc, origpath,
- params, query, '')),
- #'packagetype': 'sdist',
- }
- if pyver:
- result['python-version'] = pyver
- break
- if result and algo:
- result['%s_digest' % algo] = digest
- return result
-
- def _get_digest(self, info):
- """
- Get a digest from a dictionary by looking at keys of the form
- 'algo_digest'.
-
- Returns a 2-tuple (algo, digest) if found, else None. Currently
- looks only for SHA256, then MD5.
- """
- result = None
- for algo in ('sha256', 'md5'):
- key = '%s_digest' % algo
- if key in info:
- result = (algo, info[key])
- break
- return result
-
- def _update_version_data(self, result, info):
- """
- Update a result dictionary (the final result from _get_project) with a
- dictionary for a specific version, which typically holds information
- gleaned from a filename or URL for an archive for the distribution.
- """
- name = info.pop('name')
- version = info.pop('version')
- if version in result:
- dist = result[version]
- md = dist.metadata
- else:
- dist = make_dist(name, version, scheme=self.scheme)
- md = dist.metadata
- dist.digest = digest = self._get_digest(info)
- url = info['url']
- result['digests'][url] = digest
- if md.source_url != info['url']:
- md.source_url = self.prefer_url(md.source_url, url)
- result['urls'].setdefault(version, set()).add(url)
- dist.locator = self
- result[version] = dist
-
- def locate(self, requirement, prereleases=False):
- """
- Find the most recent distribution which matches the given
- requirement.
-
- :param requirement: A requirement of the form 'foo (1.0)' or perhaps
- 'foo (>= 1.0, < 2.0, != 1.3)'
- :param prereleases: If ``True``, allow pre-release versions
- to be located. Otherwise, pre-release versions
- are not returned.
- :return: A :class:`Distribution` instance, or ``None`` if no such
- distribution could be located.
- """
- result = None
- r = parse_requirement(requirement)
- if r is None:
- raise DistlibException('Not a valid requirement: %r' % requirement)
- scheme = get_scheme(self.scheme)
- self.matcher = matcher = scheme.matcher(r.requirement)
- logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
- versions = self.get_project(r.name)
- if len(versions) > 2: # urls and digests keys are present
- # sometimes, versions are invalid
- slist = []
- vcls = matcher.version_class
- for k in versions:
- if k in ('urls', 'digests'):
- continue
- try:
- if not matcher.match(k):
- logger.debug('%s did not match %r', matcher, k)
- else:
- if prereleases or not vcls(k).is_prerelease:
- slist.append(k)
- else:
- logger.debug('skipping pre-release '
- 'version %s of %s', k, matcher.name)
- except Exception: # pragma: no cover
- logger.warning('error matching %s with %r', matcher, k)
- pass # slist.append(k)
- if len(slist) > 1:
- slist = sorted(slist, key=scheme.key)
- if slist:
- logger.debug('sorted list: %s', slist)
- version = slist[-1]
- result = versions[version]
- if result:
- if r.extras:
- result.extras = r.extras
- result.download_urls = versions.get('urls', {}).get(version, set())
- d = {}
- sd = versions.get('digests', {})
- for url in result.download_urls:
- if url in sd:
- d[url] = sd[url]
- result.digests = d
- self.matcher = None
- return result
-
-
-class PyPIRPCLocator(Locator):
- """
- This locator uses XML-RPC to locate distributions. It therefore
- cannot be used with simple mirrors (that only mirror file content).
- """
- def __init__(self, url, **kwargs):
- """
- Initialise an instance.
-
- :param url: The URL to use for XML-RPC.
- :param kwargs: Passed to the superclass constructor.
- """
- super(PyPIRPCLocator, self).__init__(**kwargs)
- self.base_url = url
- self.client = ServerProxy(url, timeout=3.0)
-
- def get_distribution_names(self):
- """
- Return all the distribution names known to this locator.
- """
- return set(self.client.list_packages())
-
- def _get_project(self, name):
- result = {'urls': {}, 'digests': {}}
- versions = self.client.package_releases(name, True)
- for v in versions:
- urls = self.client.release_urls(name, v)
- data = self.client.release_data(name, v)
- metadata = Metadata(scheme=self.scheme)
- metadata.name = data['name']
- metadata.version = data['version']
- metadata.license = data.get('license')
- metadata.keywords = data.get('keywords', [])
- metadata.summary = data.get('summary')
- dist = Distribution(metadata)
- if urls:
- info = urls[0]
- metadata.source_url = info['url']
- dist.digest = self._get_digest(info)
- dist.locator = self
- result[v] = dist
- for info in urls:
- url = info['url']
- digest = self._get_digest(info)
- result['urls'].setdefault(v, set()).add(url)
- result['digests'][url] = digest
- return result
-
-class PyPIJSONLocator(Locator):
- """
- This locator uses PyPI's JSON interface. It's very limited in functionality
- and probably not worth using.
- """
- def __init__(self, url, **kwargs):
- super(PyPIJSONLocator, self).__init__(**kwargs)
- self.base_url = ensure_slash(url)
-
- def get_distribution_names(self):
- """
- Return all the distribution names known to this locator.
- """
- raise NotImplementedError('Not available from this locator')
-
- def _get_project(self, name):
- result = {'urls': {}, 'digests': {}}
- url = urljoin(self.base_url, '%s/json' % quote(name))
- try:
- resp = self.opener.open(url)
- data = resp.read().decode() # for now
- d = json.loads(data)
- md = Metadata(scheme=self.scheme)
- data = d['info']
- md.name = data['name']
- md.version = data['version']
- md.license = data.get('license')
- md.keywords = data.get('keywords', [])
- md.summary = data.get('summary')
- dist = Distribution(md)
- dist.locator = self
- urls = d['urls']
- result[md.version] = dist
- for info in d['urls']:
- url = info['url']
- dist.download_urls.add(url)
- dist.digests[url] = self._get_digest(info)
- result['urls'].setdefault(md.version, set()).add(url)
- result['digests'][url] = self._get_digest(info)
- # Now get other releases
- for version, infos in d['releases'].items():
- if version == md.version:
- continue # already done
- omd = Metadata(scheme=self.scheme)
- omd.name = md.name
- omd.version = version
- odist = Distribution(omd)
- odist.locator = self
- result[version] = odist
- for info in infos:
- url = info['url']
- odist.download_urls.add(url)
- odist.digests[url] = self._get_digest(info)
- result['urls'].setdefault(version, set()).add(url)
- result['digests'][url] = self._get_digest(info)
-# for info in urls:
-# md.source_url = info['url']
-# dist.digest = self._get_digest(info)
-# dist.locator = self
-# for info in urls:
-# url = info['url']
-# result['urls'].setdefault(md.version, set()).add(url)
-# result['digests'][url] = self._get_digest(info)
- except Exception as e:
- self.errors.put(text_type(e))
- logger.exception('JSON fetch failed: %s', e)
- return result
-
-
-class Page(object):
- """
- This class represents a scraped HTML page.
- """
- # The following slightly hairy-looking regex just looks for the contents of
- # an anchor link, which has an attribute "href" either immediately preceded
- # or immediately followed by a "rel" attribute. The attribute values can be
- # declared with double quotes, single quotes or no quotes - which leads to
- # the length of the expression.
- _href = re.compile("""
-(rel\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s\n]*))\s+)?
-href\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s\n]*))
-(\s+rel\s*=\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\s\n]*)))?
-""", re.I | re.S | re.X)
- _base = re.compile(r"""]+)""", re.I | re.S)
-
- def __init__(self, data, url):
- """
- Initialise an instance with the Unicode page contents and the URL they
- came from.
- """
- self.data = data
- self.base_url = self.url = url
- m = self._base.search(self.data)
- if m:
- self.base_url = m.group(1)
-
- _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
-
- @cached_property
- def links(self):
- """
- Return the URLs of all the links on a page together with information
- about their "rel" attribute, for determining which ones to treat as
- downloads and which ones to queue for further scraping.
- """
- def clean(url):
- "Tidy up an URL."
- scheme, netloc, path, params, query, frag = urlparse(url)
- return urlunparse((scheme, netloc, quote(path),
- params, query, frag))
-
- result = set()
- for match in self._href.finditer(self.data):
- d = match.groupdict('')
- rel = (d['rel1'] or d['rel2'] or d['rel3'] or
- d['rel4'] or d['rel5'] or d['rel6'])
- url = d['url1'] or d['url2'] or d['url3']
- url = urljoin(self.base_url, url)
- url = unescape(url)
- url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
- result.add((url, rel))
- # We sort the result, hoping to bring the most recent versions
- # to the front
- result = sorted(result, key=lambda t: t[0], reverse=True)
- return result
-
-
-class SimpleScrapingLocator(Locator):
- """
- A locator which scrapes HTML pages to locate downloads for a distribution.
- This runs multiple threads to do the I/O; performance is at least as good
- as pip's PackageFinder, which works in an analogous fashion.
- """
-
- # These are used to deal with various Content-Encoding schemes.
- decoders = {
- 'deflate': zlib.decompress,
- 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
- 'none': lambda b: b,
- }
-
- def __init__(self, url, timeout=None, num_workers=10, **kwargs):
- """
- Initialise an instance.
- :param url: The root URL to use for scraping.
- :param timeout: The timeout, in seconds, to be applied to requests.
- This defaults to ``None`` (no timeout specified).
- :param num_workers: The number of worker threads you want to do I/O,
- This defaults to 10.
- :param kwargs: Passed to the superclass.
- """
- super(SimpleScrapingLocator, self).__init__(**kwargs)
- self.base_url = ensure_slash(url)
- self.timeout = timeout
- self._page_cache = {}
- self._seen = set()
- self._to_fetch = queue.Queue()
- self._bad_hosts = set()
- self.skip_externals = False
- self.num_workers = num_workers
- self._lock = threading.RLock()
- # See issue #45: we need to be resilient when the locator is used
- # in a thread, e.g. with concurrent.futures. We can't use self._lock
- # as it is for coordinating our internal threads - the ones created
- # in _prepare_threads.
- self._gplock = threading.RLock()
-
- def _prepare_threads(self):
- """
- Threads are created only when get_project is called, and terminate
- before it returns. They are there primarily to parallelise I/O (i.e.
- fetching web pages).
- """
- self._threads = []
- for i in range(self.num_workers):
- t = threading.Thread(target=self._fetch)
- t.setDaemon(True)
- t.start()
- self._threads.append(t)
-
- def _wait_threads(self):
- """
- Tell all the threads to terminate (by sending a sentinel value) and
- wait for them to do so.
- """
- # Note that you need two loops, since you can't say which
- # thread will get each sentinel
- for t in self._threads:
- self._to_fetch.put(None) # sentinel
- for t in self._threads:
- t.join()
- self._threads = []
-
- def _get_project(self, name):
- result = {'urls': {}, 'digests': {}}
- with self._gplock:
- self.result = result
- self.project_name = name
- url = urljoin(self.base_url, '%s/' % quote(name))
- self._seen.clear()
- self._page_cache.clear()
- self._prepare_threads()
- try:
- logger.debug('Queueing %s', url)
- self._to_fetch.put(url)
- self._to_fetch.join()
- finally:
- self._wait_threads()
- del self.result
- return result
-
- platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|'
- r'win(32|-amd64)|macosx-?\d+)\b', re.I)
-
- def _is_platform_dependent(self, url):
- """
- Does an URL refer to a platform-specific download?
- """
- return self.platform_dependent.search(url)
-
- def _process_download(self, url):
- """
- See if an URL is a suitable download for a project.
-
- If it is, register information in the result dictionary (for
- _get_project) about the specific version it's for.
-
- Note that the return value isn't actually used other than as a boolean
- value.
- """
- if self._is_platform_dependent(url):
- info = None
- else:
- info = self.convert_url_to_download_info(url, self.project_name)
- logger.debug('process_download: %s -> %s', url, info)
- if info:
- with self._lock: # needed because self.result is shared
- self._update_version_data(self.result, info)
- return info
-
- def _should_queue(self, link, referrer, rel):
- """
- Determine whether a link URL from a referring page and with a
- particular "rel" attribute should be queued for scraping.
- """
- scheme, netloc, path, _, _, _ = urlparse(link)
- if path.endswith(self.source_extensions + self.binary_extensions +
- self.excluded_extensions):
- result = False
- elif self.skip_externals and not link.startswith(self.base_url):
- result = False
- elif not referrer.startswith(self.base_url):
- result = False
- elif rel not in ('homepage', 'download'):
- result = False
- elif scheme not in ('http', 'https', 'ftp'):
- result = False
- elif self._is_platform_dependent(link):
- result = False
- else:
- host = netloc.split(':', 1)[0]
- if host.lower() == 'localhost':
- result = False
- else:
- result = True
- logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
- referrer, result)
- return result
-
- def _fetch(self):
- """
- Get a URL to fetch from the work queue, get the HTML page, examine its
- links for download candidates and candidates for further scraping.
-
- This is a handy method to run in a thread.
- """
- while True:
- url = self._to_fetch.get()
- try:
- if url:
- page = self.get_page(url)
- if page is None: # e.g. after an error
- continue
- for link, rel in page.links:
- if link not in self._seen:
- self._seen.add(link)
- if (not self._process_download(link) and
- self._should_queue(link, url, rel)):
- logger.debug('Queueing %s from %s', link, url)
- self._to_fetch.put(link)
- except Exception as e: # pragma: no cover
- self.errors.put(text_type(e))
- finally:
- # always do this, to avoid hangs :-)
- self._to_fetch.task_done()
- if not url:
- #logger.debug('Sentinel seen, quitting.')
- break
-
- def get_page(self, url):
- """
- Get the HTML for an URL, possibly from an in-memory cache.
-
- XXX TODO Note: this cache is never actually cleared. It's assumed that
- the data won't get stale over the lifetime of a locator instance (not
- necessarily true for the default_locator).
- """
- # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
- scheme, netloc, path, _, _, _ = urlparse(url)
- if scheme == 'file' and os.path.isdir(url2pathname(path)):
- url = urljoin(ensure_slash(url), 'index.html')
-
- if url in self._page_cache:
- result = self._page_cache[url]
- logger.debug('Returning %s from cache: %s', url, result)
- else:
- host = netloc.split(':', 1)[0]
- result = None
- if host in self._bad_hosts:
- logger.debug('Skipping %s due to bad host %s', url, host)
- else:
- req = Request(url, headers={'Accept-encoding': 'identity'})
- try:
- logger.debug('Fetching %s', url)
- resp = self.opener.open(req, timeout=self.timeout)
- logger.debug('Fetched %s', url)
- headers = resp.info()
- content_type = headers.get('Content-Type', '')
- if HTML_CONTENT_TYPE.match(content_type):
- final_url = resp.geturl()
- data = resp.read()
- encoding = headers.get('Content-Encoding')
- if encoding:
- decoder = self.decoders[encoding] # fail if not found
- data = decoder(data)
- encoding = 'utf-8'
- m = CHARSET.search(content_type)
- if m:
- encoding = m.group(1)
- try:
- data = data.decode(encoding)
- except UnicodeError: # pragma: no cover
- data = data.decode('latin-1') # fallback
- result = Page(data, final_url)
- self._page_cache[final_url] = result
- except HTTPError as e:
- if e.code != 404:
- logger.exception('Fetch failed: %s: %s', url, e)
- except URLError as e: # pragma: no cover
- logger.exception('Fetch failed: %s: %s', url, e)
- with self._lock:
- self._bad_hosts.add(host)
- except Exception as e: # pragma: no cover
- logger.exception('Fetch failed: %s: %s', url, e)
- finally:
- self._page_cache[url] = result # even if None (failure)
- return result
-
- _distname_re = re.compile(']*>([^<]+)<')
-
- def get_distribution_names(self):
- """
- Return all the distribution names known to this locator.
- """
- result = set()
- page = self.get_page(self.base_url)
- if not page:
- raise DistlibException('Unable to get %s' % self.base_url)
- for match in self._distname_re.finditer(page.data):
- result.add(match.group(1))
- return result
-
-class DirectoryLocator(Locator):
- """
- This class locates distributions in a directory tree.
- """
-
- def __init__(self, path, **kwargs):
- """
- Initialise an instance.
- :param path: The root of the directory tree to search.
- :param kwargs: Passed to the superclass constructor,
- except for:
- * recursive - if True (the default), subdirectories are
- recursed into. If False, only the top-level directory
- is searched,
- """
- self.recursive = kwargs.pop('recursive', True)
- super(DirectoryLocator, self).__init__(**kwargs)
- path = os.path.abspath(path)
- if not os.path.isdir(path): # pragma: no cover
- raise DistlibException('Not a directory: %r' % path)
- self.base_dir = path
-
- def should_include(self, filename, parent):
- """
- Should a filename be considered as a candidate for a distribution
- archive? As well as the filename, the directory which contains it
- is provided, though not used by the current implementation.
- """
- return filename.endswith(self.downloadable_extensions)
-
- def _get_project(self, name):
- result = {'urls': {}, 'digests': {}}
- for root, dirs, files in os.walk(self.base_dir):
- for fn in files:
- if self.should_include(fn, root):
- fn = os.path.join(root, fn)
- url = urlunparse(('file', '',
- pathname2url(os.path.abspath(fn)),
- '', '', ''))
- info = self.convert_url_to_download_info(url, name)
- if info:
- self._update_version_data(result, info)
- if not self.recursive:
- break
- return result
-
- def get_distribution_names(self):
- """
- Return all the distribution names known to this locator.
- """
- result = set()
- for root, dirs, files in os.walk(self.base_dir):
- for fn in files:
- if self.should_include(fn, root):
- fn = os.path.join(root, fn)
- url = urlunparse(('file', '',
- pathname2url(os.path.abspath(fn)),
- '', '', ''))
- info = self.convert_url_to_download_info(url, None)
- if info:
- result.add(info['name'])
- if not self.recursive:
- break
- return result
-
-class JSONLocator(Locator):
- """
- This locator uses special extended metadata (not available on PyPI) and is
- the basis of performant dependency resolution in distlib. Other locators
- require archive downloads before dependencies can be determined! As you
- might imagine, that can be slow.
- """
- def get_distribution_names(self):
- """
- Return all the distribution names known to this locator.
- """
- raise NotImplementedError('Not available from this locator')
-
- def _get_project(self, name):
- result = {'urls': {}, 'digests': {}}
- data = get_project_data(name)
- if data:
- for info in data.get('files', []):
- if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
- continue
- # We don't store summary in project metadata as it makes
- # the data bigger for no benefit during dependency
- # resolution
- dist = make_dist(data['name'], info['version'],
- summary=data.get('summary',
- 'Placeholder for summary'),
- scheme=self.scheme)
- md = dist.metadata
- md.source_url = info['url']
- # TODO SHA256 digest
- if 'digest' in info and info['digest']:
- dist.digest = ('md5', info['digest'])
- md.dependencies = info.get('requirements', {})
- dist.exports = info.get('exports', {})
- result[dist.version] = dist
- result['urls'].setdefault(dist.version, set()).add(info['url'])
- return result
-
-class DistPathLocator(Locator):
- """
- This locator finds installed distributions in a path. It can be useful for
- adding to an :class:`AggregatingLocator`.
- """
- def __init__(self, distpath, **kwargs):
- """
- Initialise an instance.
-
- :param distpath: A :class:`DistributionPath` instance to search.
- """
- super(DistPathLocator, self).__init__(**kwargs)
- assert isinstance(distpath, DistributionPath)
- self.distpath = distpath
-
- def _get_project(self, name):
- dist = self.distpath.get_distribution(name)
- if dist is None:
- result = {'urls': {}, 'digests': {}}
- else:
- result = {
- dist.version: dist,
- 'urls': {dist.version: set([dist.source_url])},
- 'digests': {dist.version: set([None])}
- }
- return result
-
-
-class AggregatingLocator(Locator):
- """
- This class allows you to chain and/or merge a list of locators.
- """
- def __init__(self, *locators, **kwargs):
- """
- Initialise an instance.
-
- :param locators: The list of locators to search.
- :param kwargs: Passed to the superclass constructor,
- except for:
- * merge - if False (the default), the first successful
- search from any of the locators is returned. If True,
- the results from all locators are merged (this can be
- slow).
- """
- self.merge = kwargs.pop('merge', False)
- self.locators = locators
- super(AggregatingLocator, self).__init__(**kwargs)
-
- def clear_cache(self):
- super(AggregatingLocator, self).clear_cache()
- for locator in self.locators:
- locator.clear_cache()
-
- def _set_scheme(self, value):
- self._scheme = value
- for locator in self.locators:
- locator.scheme = value
-
- scheme = property(Locator.scheme.fget, _set_scheme)
-
- def _get_project(self, name):
- result = {}
- for locator in self.locators:
- d = locator.get_project(name)
- if d:
- if self.merge:
- files = result.get('urls', {})
- digests = result.get('digests', {})
- # next line could overwrite result['urls'], result['digests']
- result.update(d)
- df = result.get('urls')
- if files and df:
- for k, v in files.items():
- if k in df:
- df[k] |= v
- else:
- df[k] = v
- dd = result.get('digests')
- if digests and dd:
- dd.update(digests)
- else:
- # See issue #18. If any dists are found and we're looking
- # for specific constraints, we only return something if
- # a match is found. For example, if a DirectoryLocator
- # returns just foo (1.0) while we're looking for
- # foo (>= 2.0), we'll pretend there was nothing there so
- # that subsequent locators can be queried. Otherwise we
- # would just return foo (1.0) which would then lead to a
- # failure to find foo (>= 2.0), because other locators
- # weren't searched. Note that this only matters when
- # merge=False.
- if self.matcher is None:
- found = True
- else:
- found = False
- for k in d:
- if self.matcher.match(k):
- found = True
- break
- if found:
- result = d
- break
- return result
-
- def get_distribution_names(self):
- """
- Return all the distribution names known to this locator.
- """
- result = set()
- for locator in self.locators:
- try:
- result |= locator.get_distribution_names()
- except NotImplementedError:
- pass
- return result
-
-
-# We use a legacy scheme simply because most of the dists on PyPI use legacy
-# versions which don't conform to PEP 426 / PEP 440.
-default_locator = AggregatingLocator(
- JSONLocator(),
- SimpleScrapingLocator('https://pypi.python.org/simple/',
- timeout=3.0),
- scheme='legacy')
-
-locate = default_locator.locate
-
-NAME_VERSION_RE = re.compile(r'(?P[\w-]+)\s*'
- r'\(\s*(==\s*)?(?P[^)]+)\)$')
-
-class DependencyFinder(object):
- """
- Locate dependencies for distributions.
- """
-
- def __init__(self, locator=None):
- """
- Initialise an instance, using the specified locator
- to locate distributions.
- """
- self.locator = locator or default_locator
- self.scheme = get_scheme(self.locator.scheme)
-
- def add_distribution(self, dist):
- """
- Add a distribution to the finder. This will update internal information
- about who provides what.
- :param dist: The distribution to add.
- """
- logger.debug('adding distribution %s', dist)
- name = dist.key
- self.dists_by_name[name] = dist
- self.dists[(name, dist.version)] = dist
- for p in dist.provides:
- name, version = parse_name_and_version(p)
- logger.debug('Add to provided: %s, %s, %s', name, version, dist)
- self.provided.setdefault(name, set()).add((version, dist))
-
- def remove_distribution(self, dist):
- """
- Remove a distribution from the finder. This will update internal
- information about who provides what.
- :param dist: The distribution to remove.
- """
- logger.debug('removing distribution %s', dist)
- name = dist.key
- del self.dists_by_name[name]
- del self.dists[(name, dist.version)]
- for p in dist.provides:
- name, version = parse_name_and_version(p)
- logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
- s = self.provided[name]
- s.remove((version, dist))
- if not s:
- del self.provided[name]
-
- def get_matcher(self, reqt):
- """
- Get a version matcher for a requirement.
- :param reqt: The requirement
- :type reqt: str
- :return: A version matcher (an instance of
- :class:`distlib.version.Matcher`).
- """
- try:
- matcher = self.scheme.matcher(reqt)
- except UnsupportedVersionError: # pragma: no cover
- # XXX compat-mode if cannot read the version
- name = reqt.split()[0]
- matcher = self.scheme.matcher(name)
- return matcher
-
- def find_providers(self, reqt):
- """
- Find the distributions which can fulfill a requirement.
-
- :param reqt: The requirement.
- :type reqt: str
- :return: A set of distribution which can fulfill the requirement.
- """
- matcher = self.get_matcher(reqt)
- name = matcher.key # case-insensitive
- result = set()
- provided = self.provided
- if name in provided:
- for version, provider in provided[name]:
- try:
- match = matcher.match(version)
- except UnsupportedVersionError:
- match = False
-
- if match:
- result.add(provider)
- break
- return result
-
- def try_to_replace(self, provider, other, problems):
- """
- Attempt to replace one provider with another. This is typically used
- when resolving dependencies from multiple sources, e.g. A requires
- (B >= 1.0) while C requires (B >= 1.1).
-
- For successful replacement, ``provider`` must meet all the requirements
- which ``other`` fulfills.
-
- :param provider: The provider we are trying to replace with.
- :param other: The provider we're trying to replace.
- :param problems: If False is returned, this will contain what
- problems prevented replacement. This is currently
- a tuple of the literal string 'cantreplace',
- ``provider``, ``other`` and the set of requirements
- that ``provider`` couldn't fulfill.
- :return: True if we can replace ``other`` with ``provider``, else
- False.
- """
- rlist = self.reqts[other]
- unmatched = set()
- for s in rlist:
- matcher = self.get_matcher(s)
- if not matcher.match(provider.version):
- unmatched.add(s)
- if unmatched:
- # can't replace other with provider
- problems.add(('cantreplace', provider, other,
- frozenset(unmatched)))
- result = False
- else:
- # can replace other with provider
- self.remove_distribution(other)
- del self.reqts[other]
- for s in rlist:
- self.reqts.setdefault(provider, set()).add(s)
- self.add_distribution(provider)
- result = True
- return result
-
- def find(self, requirement, meta_extras=None, prereleases=False):
- """
- Find a distribution and all distributions it depends on.
-
- :param requirement: The requirement specifying the distribution to
- find, or a Distribution instance.
- :param meta_extras: A list of meta extras such as :test:, :build: and
- so on.
- :param prereleases: If ``True``, allow pre-release versions to be
- returned - otherwise, don't return prereleases
- unless they're all that's available.
-
- Return a set of :class:`Distribution` instances and a set of
- problems.
-
- The distributions returned should be such that they have the
- :attr:`required` attribute set to ``True`` if they were
- from the ``requirement`` passed to ``find()``, and they have the
- :attr:`build_time_dependency` attribute set to ``True`` unless they
- are post-installation dependencies of the ``requirement``.
-
- The problems should be a tuple consisting of the string
- ``'unsatisfied'`` and the requirement which couldn't be satisfied
- by any distribution known to the locator.
- """
-
- self.provided = {}
- self.dists = {}
- self.dists_by_name = {}
- self.reqts = {}
-
- meta_extras = set(meta_extras or [])
- if ':*:' in meta_extras:
- meta_extras.remove(':*:')
- # :meta: and :run: are implicitly included
- meta_extras |= set([':test:', ':build:', ':dev:'])
-
- if isinstance(requirement, Distribution):
- dist = odist = requirement
- logger.debug('passed %s as requirement', odist)
- else:
- dist = odist = self.locator.locate(requirement,
- prereleases=prereleases)
- if dist is None:
- raise DistlibException('Unable to locate %r' % requirement)
- logger.debug('located %s', odist)
- dist.requested = True
- problems = set()
- todo = set([dist])
- install_dists = set([odist])
- while todo:
- dist = todo.pop()
- name = dist.key # case-insensitive
- if name not in self.dists_by_name:
- self.add_distribution(dist)
- else:
- #import pdb; pdb.set_trace()
- other = self.dists_by_name[name]
- if other != dist:
- self.try_to_replace(dist, other, problems)
-
- ireqts = dist.run_requires | dist.meta_requires
- sreqts = dist.build_requires
- ereqts = set()
- if dist in install_dists:
- for key in ('test', 'build', 'dev'):
- e = ':%s:' % key
- if e in meta_extras:
- ereqts |= getattr(dist, '%s_requires' % key)
- all_reqts = ireqts | sreqts | ereqts
- for r in all_reqts:
- providers = self.find_providers(r)
- if not providers:
- logger.debug('No providers found for %r', r)
- provider = self.locator.locate(r, prereleases=prereleases)
- # If no provider is found and we didn't consider
- # prereleases, consider them now.
- if provider is None and not prereleases:
- provider = self.locator.locate(r, prereleases=True)
- if provider is None:
- logger.debug('Cannot satisfy %r', r)
- problems.add(('unsatisfied', r))
- else:
- n, v = provider.key, provider.version
- if (n, v) not in self.dists:
- todo.add(provider)
- providers.add(provider)
- if r in ireqts and dist in install_dists:
- install_dists.add(provider)
- logger.debug('Adding %s to install_dists',
- provider.name_and_version)
- for p in providers:
- name = p.key
- if name not in self.dists_by_name:
- self.reqts.setdefault(p, set()).add(r)
- else:
- other = self.dists_by_name[name]
- if other != p:
- # see if other can be replaced by p
- self.try_to_replace(p, other, problems)
-
- dists = set(self.dists.values())
- for dist in dists:
- dist.build_time_dependency = dist not in install_dists
- if dist.build_time_dependency:
- logger.debug('%s is a build-time dependency only.',
- dist.name_and_version)
- logger.debug('find done for %s', odist)
- return dists, problems
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/manifest.py b/env/Lib/site-packages/pip/_vendor/distlib/manifest.py
deleted file mode 100644
index 9f03364..0000000
--- a/env/Lib/site-packages/pip/_vendor/distlib/manifest.py
+++ /dev/null
@@ -1,393 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2012-2013 Python Software Foundation.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-"""
-Class representing the list of files in a distribution.
-
-Equivalent to distutils.filelist, but fixes some problems.
-"""
-import fnmatch
-import logging
-import os
-import re
-import sys
-
-from . import DistlibException
-from .compat import fsdecode
-from .util import convert_path
-
-
-__all__ = ['Manifest']
-
-logger = logging.getLogger(__name__)
-
-# a \ followed by some spaces + EOL
-_COLLAPSE_PATTERN = re.compile('\\\w*\n', re.M)
-_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S)
-
-#
-# Due to the different results returned by fnmatch.translate, we need
-# to do slightly different processing for Python 2.7 and 3.2 ... this needed
-# to be brought in for Python 3.6 onwards.
-#
-_PYTHON_VERSION = sys.version_info[:2]
-
-class Manifest(object):
- """A list of files built by on exploring the filesystem and filtered by
- applying various patterns to what we find there.
- """
-
- def __init__(self, base=None):
- """
- Initialise an instance.
-
- :param base: The base directory to explore under.
- """
- self.base = os.path.abspath(os.path.normpath(base or os.getcwd()))
- self.prefix = self.base + os.sep
- self.allfiles = None
- self.files = set()
-
- #
- # Public API
- #
-
- def findall(self):
- """Find all files under the base and set ``allfiles`` to the absolute
- pathnames of files found.
- """
- from stat import S_ISREG, S_ISDIR, S_ISLNK
-
- self.allfiles = allfiles = []
- root = self.base
- stack = [root]
- pop = stack.pop
- push = stack.append
-
- while stack:
- root = pop()
- names = os.listdir(root)
-
- for name in names:
- fullname = os.path.join(root, name)
-
- # Avoid excess stat calls -- just one will do, thank you!
- stat = os.stat(fullname)
- mode = stat.st_mode
- if S_ISREG(mode):
- allfiles.append(fsdecode(fullname))
- elif S_ISDIR(mode) and not S_ISLNK(mode):
- push(fullname)
-
- def add(self, item):
- """
- Add a file to the manifest.
-
- :param item: The pathname to add. This can be relative to the base.
- """
- if not item.startswith(self.prefix):
- item = os.path.join(self.base, item)
- self.files.add(os.path.normpath(item))
-
- def add_many(self, items):
- """
- Add a list of files to the manifest.
-
- :param items: The pathnames to add. These can be relative to the base.
- """
- for item in items:
- self.add(item)
-
- def sorted(self, wantdirs=False):
- """
- Return sorted files in directory order
- """
-
- def add_dir(dirs, d):
- dirs.add(d)
- logger.debug('add_dir added %s', d)
- if d != self.base:
- parent, _ = os.path.split(d)
- assert parent not in ('', '/')
- add_dir(dirs, parent)
-
- result = set(self.files) # make a copy!
- if wantdirs:
- dirs = set()
- for f in result:
- add_dir(dirs, os.path.dirname(f))
- result |= dirs
- return [os.path.join(*path_tuple) for path_tuple in
- sorted(os.path.split(path) for path in result)]
-
- def clear(self):
- """Clear all collected files."""
- self.files = set()
- self.allfiles = []
-
- def process_directive(self, directive):
- """
- Process a directive which either adds some files from ``allfiles`` to
- ``files``, or removes some files from ``files``.
-
- :param directive: The directive to process. This should be in a format
- compatible with distutils ``MANIFEST.in`` files:
-
- http://docs.python.org/distutils/sourcedist.html#commands
- """
- # Parse the line: split it up, make sure the right number of words
- # is there, and return the relevant words. 'action' is always
- # defined: it's the first word of the line. Which of the other
- # three are defined depends on the action; it'll be either
- # patterns, (dir and patterns), or (dirpattern).
- action, patterns, thedir, dirpattern = self._parse_directive(directive)
-
- # OK, now we know that the action is valid and we have the
- # right number of words on the line for that action -- so we
- # can proceed with minimal error-checking.
- if action == 'include':
- for pattern in patterns:
- if not self._include_pattern(pattern, anchor=True):
- logger.warning('no files found matching %r', pattern)
-
- elif action == 'exclude':
- for pattern in patterns:
- found = self._exclude_pattern(pattern, anchor=True)
- #if not found:
- # logger.warning('no previously-included files '
- # 'found matching %r', pattern)
-
- elif action == 'global-include':
- for pattern in patterns:
- if not self._include_pattern(pattern, anchor=False):
- logger.warning('no files found matching %r '
- 'anywhere in distribution', pattern)
-
- elif action == 'global-exclude':
- for pattern in patterns:
- found = self._exclude_pattern(pattern, anchor=False)
- #if not found:
- # logger.warning('no previously-included files '
- # 'matching %r found anywhere in '
- # 'distribution', pattern)
-
- elif action == 'recursive-include':
- for pattern in patterns:
- if not self._include_pattern(pattern, prefix=thedir):
- logger.warning('no files found matching %r '
- 'under directory %r', pattern, thedir)
-
- elif action == 'recursive-exclude':
- for pattern in patterns:
- found = self._exclude_pattern(pattern, prefix=thedir)
- #if not found:
- # logger.warning('no previously-included files '
- # 'matching %r found under directory %r',
- # pattern, thedir)
-
- elif action == 'graft':
- if not self._include_pattern(None, prefix=dirpattern):
- logger.warning('no directories found matching %r',
- dirpattern)
-
- elif action == 'prune':
- if not self._exclude_pattern(None, prefix=dirpattern):
- logger.warning('no previously-included directories found '
- 'matching %r', dirpattern)
- else: # pragma: no cover
- # This should never happen, as it should be caught in
- # _parse_template_line
- raise DistlibException(
- 'invalid action %r' % action)
-
- #
- # Private API
- #
-
- def _parse_directive(self, directive):
- """
- Validate a directive.
- :param directive: The directive to validate.
- :return: A tuple of action, patterns, thedir, dir_patterns
- """
- words = directive.split()
- if len(words) == 1 and words[0] not in ('include', 'exclude',
- 'global-include',
- 'global-exclude',
- 'recursive-include',
- 'recursive-exclude',
- 'graft', 'prune'):
- # no action given, let's use the default 'include'
- words.insert(0, 'include')
-
- action = words[0]
- patterns = thedir = dir_pattern = None
-
- if action in ('include', 'exclude',
- 'global-include', 'global-exclude'):
- if len(words) < 2:
- raise DistlibException(
- '%r expects ...' % action)
-
- patterns = [convert_path(word) for word in words[1:]]
-
- elif action in ('recursive-include', 'recursive-exclude'):
- if len(words) < 3:
- raise DistlibException(
- '%r expects ...' % action)
-
- thedir = convert_path(words[1])
- patterns = [convert_path(word) for word in words[2:]]
-
- elif action in ('graft', 'prune'):
- if len(words) != 2:
- raise DistlibException(
- '%r expects a single ' % action)
-
- dir_pattern = convert_path(words[1])
-
- else:
- raise DistlibException('unknown action %r' % action)
-
- return action, patterns, thedir, dir_pattern
-
- def _include_pattern(self, pattern, anchor=True, prefix=None,
- is_regex=False):
- """Select strings (presumably filenames) from 'self.files' that
- match 'pattern', a Unix-style wildcard (glob) pattern.
-
- Patterns are not quite the same as implemented by the 'fnmatch'
- module: '*' and '?' match non-special characters, where "special"
- is platform-dependent: slash on Unix; colon, slash, and backslash on
- DOS/Windows; and colon on Mac OS.
-
- If 'anchor' is true (the default), then the pattern match is more
- stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
- 'anchor' is false, both of these will match.
-
- If 'prefix' is supplied, then only filenames starting with 'prefix'
- (itself a pattern) and ending with 'pattern', with anything in between
- them, will match. 'anchor' is ignored in this case.
-
- If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
- 'pattern' is assumed to be either a string containing a regex or a
- regex object -- no translation is done, the regex is just compiled
- and used as-is.
-
- Selected strings will be added to self.files.
-
- Return True if files are found.
- """
- # XXX docstring lying about what the special chars are?
- found = False
- pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
-
- # delayed loading of allfiles list
- if self.allfiles is None:
- self.findall()
-
- for name in self.allfiles:
- if pattern_re.search(name):
- self.files.add(name)
- found = True
- return found
-
- def _exclude_pattern(self, pattern, anchor=True, prefix=None,
- is_regex=False):
- """Remove strings (presumably filenames) from 'files' that match
- 'pattern'.
-
- Other parameters are the same as for 'include_pattern()', above.
- The list 'self.files' is modified in place. Return True if files are
- found.
-
- This API is public to allow e.g. exclusion of SCM subdirs, e.g. when
- packaging source distributions
- """
- found = False
- pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
- for f in list(self.files):
- if pattern_re.search(f):
- self.files.remove(f)
- found = True
- return found
-
- def _translate_pattern(self, pattern, anchor=True, prefix=None,
- is_regex=False):
- """Translate a shell-like wildcard pattern to a compiled regular
- expression.
-
- Return the compiled regex. If 'is_regex' true,
- then 'pattern' is directly compiled to a regex (if it's a string)
- or just returned as-is (assumes it's a regex object).
- """
- if is_regex:
- if isinstance(pattern, str):
- return re.compile(pattern)
- else:
- return pattern
-
- if _PYTHON_VERSION > (3, 2):
- # ditch start and end characters
- start, _, end = self._glob_to_re('_').partition('_')
-
- if pattern:
- pattern_re = self._glob_to_re(pattern)
- if _PYTHON_VERSION > (3, 2):
- assert pattern_re.startswith(start) and pattern_re.endswith(end)
- else:
- pattern_re = ''
-
- base = re.escape(os.path.join(self.base, ''))
- if prefix is not None:
- # ditch end of pattern character
- if _PYTHON_VERSION <= (3, 2):
- empty_pattern = self._glob_to_re('')
- prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)]
- else:
- prefix_re = self._glob_to_re(prefix)
- assert prefix_re.startswith(start) and prefix_re.endswith(end)
- prefix_re = prefix_re[len(start): len(prefix_re) - len(end)]
- sep = os.sep
- if os.sep == '\\':
- sep = r'\\'
- if _PYTHON_VERSION <= (3, 2):
- pattern_re = '^' + base + sep.join((prefix_re,
- '.*' + pattern_re))
- else:
- pattern_re = pattern_re[len(start): len(pattern_re) - len(end)]
- pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep,
- pattern_re, end)
- else: # no prefix -- respect anchor flag
- if anchor:
- if _PYTHON_VERSION <= (3, 2):
- pattern_re = '^' + base + pattern_re
- else:
- pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):])
-
- return re.compile(pattern_re)
-
- def _glob_to_re(self, pattern):
- """Translate a shell-like glob pattern to a regular expression.
-
- Return a string containing the regex. Differs from
- 'fnmatch.translate()' in that '*' does not match "special characters"
- (which are platform-specific).
- """
- pattern_re = fnmatch.translate(pattern)
-
- # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
- # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
- # and by extension they shouldn't match such "special characters" under
- # any OS. So change all non-escaped dots in the RE to match any
- # character except the special characters (currently: just os.sep).
- sep = os.sep
- if os.sep == '\\':
- # we're using a regex to manipulate a regex, so we need
- # to escape the backslash twice
- sep = r'\\\\'
- escaped = r'\1[^%s]' % sep
- pattern_re = re.sub(r'((? y,
- 'gte': lambda x, y: x >= y,
- 'in': lambda x, y: x in y,
- 'lt': lambda x, y: x < y,
- 'lte': lambda x, y: x <= y,
- 'not': lambda x: not x,
- 'noteq': lambda x, y: x != y,
- 'notin': lambda x, y: x not in y,
- }
-
- allowed_values = {
- 'sys_platform': sys.platform,
- 'python_version': '%s.%s' % sys.version_info[:2],
- # parsing sys.platform is not reliable, but there is no other
- # way to get e.g. 2.7.2+, and the PEP is defined with sys.version
- 'python_full_version': sys.version.split(' ', 1)[0],
- 'os_name': os.name,
- 'platform_in_venv': str(in_venv()),
- 'platform_release': platform.release(),
- 'platform_version': platform.version(),
- 'platform_machine': platform.machine(),
- 'platform_python_implementation': python_implementation(),
- }
-
- def __init__(self, context=None):
- """
- Initialise an instance.
-
- :param context: If specified, names are looked up in this mapping.
- """
- self.context = context or {}
- self.source = None
-
- def get_fragment(self, offset):
- """
- Get the part of the source which is causing a problem.
- """
- fragment_len = 10
- s = '%r' % (self.source[offset:offset + fragment_len])
- if offset + fragment_len < len(self.source):
- s += '...'
- return s
-
- def get_handler(self, node_type):
- """
- Get a handler for the specified AST node type.
- """
- return getattr(self, 'do_%s' % node_type, None)
-
- def evaluate(self, node, filename=None):
- """
- Evaluate a source string or node, using ``filename`` when
- displaying errors.
- """
- if isinstance(node, string_types):
- self.source = node
- kwargs = {'mode': 'eval'}
- if filename:
- kwargs['filename'] = filename
- try:
- node = ast.parse(node, **kwargs)
- except SyntaxError as e:
- s = self.get_fragment(e.offset)
- raise SyntaxError('syntax error %s' % s)
- node_type = node.__class__.__name__.lower()
- handler = self.get_handler(node_type)
- if handler is None:
- if self.source is None:
- s = '(source not available)'
- else:
- s = self.get_fragment(node.col_offset)
- raise SyntaxError("don't know how to evaluate %r %s" % (
- node_type, s))
- return handler(node)
-
- def get_attr_key(self, node):
- assert isinstance(node, ast.Attribute), 'attribute node expected'
- return '%s.%s' % (node.value.id, node.attr)
-
- def do_attribute(self, node):
- if not isinstance(node.value, ast.Name):
- valid = False
- else:
- key = self.get_attr_key(node)
- valid = key in self.context or key in self.allowed_values
- if not valid:
- raise SyntaxError('invalid expression: %s' % key)
- if key in self.context:
- result = self.context[key]
- else:
- result = self.allowed_values[key]
- return result
-
- def do_boolop(self, node):
- result = self.evaluate(node.values[0])
- is_or = node.op.__class__ is ast.Or
- is_and = node.op.__class__ is ast.And
- assert is_or or is_and
- if (is_and and result) or (is_or and not result):
- for n in node.values[1:]:
- result = self.evaluate(n)
- if (is_or and result) or (is_and and not result):
- break
- return result
-
- def do_compare(self, node):
- def sanity_check(lhsnode, rhsnode):
- valid = True
- if isinstance(lhsnode, ast.Str) and isinstance(rhsnode, ast.Str):
- valid = False
- #elif (isinstance(lhsnode, ast.Attribute)
- # and isinstance(rhsnode, ast.Attribute)):
- # klhs = self.get_attr_key(lhsnode)
- # krhs = self.get_attr_key(rhsnode)
- # valid = klhs != krhs
- if not valid:
- s = self.get_fragment(node.col_offset)
- raise SyntaxError('Invalid comparison: %s' % s)
-
- lhsnode = node.left
- lhs = self.evaluate(lhsnode)
- result = True
- for op, rhsnode in zip(node.ops, node.comparators):
- sanity_check(lhsnode, rhsnode)
- op = op.__class__.__name__.lower()
- if op not in self.operators:
- raise SyntaxError('unsupported operation: %r' % op)
- rhs = self.evaluate(rhsnode)
- result = self.operators[op](lhs, rhs)
- if not result:
- break
- lhs = rhs
- lhsnode = rhsnode
- return result
-
- def do_expression(self, node):
- return self.evaluate(node.body)
-
- def do_name(self, node):
- valid = False
- if node.id in self.context:
- valid = True
- result = self.context[node.id]
- elif node.id in self.allowed_values:
- valid = True
- result = self.allowed_values[node.id]
- if not valid:
- raise SyntaxError('invalid expression: %s' % node.id)
- return result
-
- def do_str(self, node):
- return node.s
-
-
-def interpret(marker, execution_context=None):
- """
- Interpret a marker and return a result depending on environment.
-
- :param marker: The marker to interpret.
- :type marker: str
- :param execution_context: The context used for name lookup.
- :type execution_context: mapping
- """
- return Evaluator(execution_context).evaluate(marker.strip())
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/metadata.py b/env/Lib/site-packages/pip/_vendor/distlib/metadata.py
deleted file mode 100644
index 75bfd68..0000000
--- a/env/Lib/site-packages/pip/_vendor/distlib/metadata.py
+++ /dev/null
@@ -1,1068 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2012 The Python Software Foundation.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-"""Implementation of the Metadata for Python packages PEPs.
-
-Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental).
-"""
-from __future__ import unicode_literals
-
-import codecs
-from email import message_from_file
-import json
-import logging
-import re
-
-
-from . import DistlibException, __version__
-from .compat import StringIO, string_types, text_type
-from .markers import interpret
-from .util import extract_by_key, get_extras
-from .version import get_scheme, PEP440_VERSION_RE
-
-logger = logging.getLogger(__name__)
-
-
-class MetadataMissingError(DistlibException):
- """A required metadata is missing"""
-
-
-class MetadataConflictError(DistlibException):
- """Attempt to read or write metadata fields that are conflictual."""
-
-
-class MetadataUnrecognizedVersionError(DistlibException):
- """Unknown metadata version number."""
-
-
-class MetadataInvalidError(DistlibException):
- """A metadata value is invalid"""
-
-# public API of this module
-__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION']
-
-# Encoding used for the PKG-INFO files
-PKG_INFO_ENCODING = 'utf-8'
-
-# preferred version. Hopefully will be changed
-# to 1.2 once PEP 345 is supported everywhere
-PKG_INFO_PREFERRED_VERSION = '1.1'
-
-_LINE_PREFIX_1_2 = re.compile('\n \|')
-_LINE_PREFIX_PRE_1_2 = re.compile('\n ')
-_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
- 'Summary', 'Description',
- 'Keywords', 'Home-page', 'Author', 'Author-email',
- 'License')
-
-_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
- 'Supported-Platform', 'Summary', 'Description',
- 'Keywords', 'Home-page', 'Author', 'Author-email',
- 'License', 'Classifier', 'Download-URL', 'Obsoletes',
- 'Provides', 'Requires')
-
-_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier',
- 'Download-URL')
-
-_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
- 'Supported-Platform', 'Summary', 'Description',
- 'Keywords', 'Home-page', 'Author', 'Author-email',
- 'Maintainer', 'Maintainer-email', 'License',
- 'Classifier', 'Download-URL', 'Obsoletes-Dist',
- 'Project-URL', 'Provides-Dist', 'Requires-Dist',
- 'Requires-Python', 'Requires-External')
-
-_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python',
- 'Obsoletes-Dist', 'Requires-External', 'Maintainer',
- 'Maintainer-email', 'Project-URL')
-
-_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
- 'Supported-Platform', 'Summary', 'Description',
- 'Keywords', 'Home-page', 'Author', 'Author-email',
- 'Maintainer', 'Maintainer-email', 'License',
- 'Classifier', 'Download-URL', 'Obsoletes-Dist',
- 'Project-URL', 'Provides-Dist', 'Requires-Dist',
- 'Requires-Python', 'Requires-External', 'Private-Version',
- 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension',
- 'Provides-Extra')
-
-_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By',
- 'Setup-Requires-Dist', 'Extension')
-
-_ALL_FIELDS = set()
-_ALL_FIELDS.update(_241_FIELDS)
-_ALL_FIELDS.update(_314_FIELDS)
-_ALL_FIELDS.update(_345_FIELDS)
-_ALL_FIELDS.update(_426_FIELDS)
-
-EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')
-
-
-def _version2fieldlist(version):
- if version == '1.0':
- return _241_FIELDS
- elif version == '1.1':
- return _314_FIELDS
- elif version == '1.2':
- return _345_FIELDS
- elif version == '2.0':
- return _426_FIELDS
- raise MetadataUnrecognizedVersionError(version)
-
-
-def _best_version(fields):
- """Detect the best version depending on the fields used."""
- def _has_marker(keys, markers):
- for marker in markers:
- if marker in keys:
- return True
- return False
-
- keys = []
- for key, value in fields.items():
- if value in ([], 'UNKNOWN', None):
- continue
- keys.append(key)
-
- possible_versions = ['1.0', '1.1', '1.2', '2.0']
-
- # first let's try to see if a field is not part of one of the version
- for key in keys:
- if key not in _241_FIELDS and '1.0' in possible_versions:
- possible_versions.remove('1.0')
- if key not in _314_FIELDS and '1.1' in possible_versions:
- possible_versions.remove('1.1')
- if key not in _345_FIELDS and '1.2' in possible_versions:
- possible_versions.remove('1.2')
- if key not in _426_FIELDS and '2.0' in possible_versions:
- possible_versions.remove('2.0')
-
- # possible_version contains qualified versions
- if len(possible_versions) == 1:
- return possible_versions[0] # found !
- elif len(possible_versions) == 0:
- raise MetadataConflictError('Unknown metadata set')
-
- # let's see if one unique marker is found
- is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)
- is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)
- is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
- if int(is_1_1) + int(is_1_2) + int(is_2_0) > 1:
- raise MetadataConflictError('You used incompatible 1.1/1.2/2.0 fields')
-
- # we have the choice, 1.0, or 1.2, or 2.0
- # - 1.0 has a broken Summary field but works with all tools
- # - 1.1 is to avoid
- # - 1.2 fixes Summary but has little adoption
- # - 2.0 adds more features and is very new
- if not is_1_1 and not is_1_2 and not is_2_0:
- # we couldn't find any specific marker
- if PKG_INFO_PREFERRED_VERSION in possible_versions:
- return PKG_INFO_PREFERRED_VERSION
- if is_1_1:
- return '1.1'
- if is_1_2:
- return '1.2'
-
- return '2.0'
-
-_ATTR2FIELD = {
- 'metadata_version': 'Metadata-Version',
- 'name': 'Name',
- 'version': 'Version',
- 'platform': 'Platform',
- 'supported_platform': 'Supported-Platform',
- 'summary': 'Summary',
- 'description': 'Description',
- 'keywords': 'Keywords',
- 'home_page': 'Home-page',
- 'author': 'Author',
- 'author_email': 'Author-email',
- 'maintainer': 'Maintainer',
- 'maintainer_email': 'Maintainer-email',
- 'license': 'License',
- 'classifier': 'Classifier',
- 'download_url': 'Download-URL',
- 'obsoletes_dist': 'Obsoletes-Dist',
- 'provides_dist': 'Provides-Dist',
- 'requires_dist': 'Requires-Dist',
- 'setup_requires_dist': 'Setup-Requires-Dist',
- 'requires_python': 'Requires-Python',
- 'requires_external': 'Requires-External',
- 'requires': 'Requires',
- 'provides': 'Provides',
- 'obsoletes': 'Obsoletes',
- 'project_url': 'Project-URL',
- 'private_version': 'Private-Version',
- 'obsoleted_by': 'Obsoleted-By',
- 'extension': 'Extension',
- 'provides_extra': 'Provides-Extra',
-}
-
-_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')
-_VERSIONS_FIELDS = ('Requires-Python',)
-_VERSION_FIELDS = ('Version',)
-_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes',
- 'Requires', 'Provides', 'Obsoletes-Dist',
- 'Provides-Dist', 'Requires-Dist', 'Requires-External',
- 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',
- 'Provides-Extra', 'Extension')
-_LISTTUPLEFIELDS = ('Project-URL',)
-
-_ELEMENTSFIELD = ('Keywords',)
-
-_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description')
-
-_MISSING = object()
-
-_FILESAFE = re.compile('[^A-Za-z0-9.]+')
-
-
-def _get_name_and_version(name, version, for_filename=False):
- """Return the distribution name with version.
-
- If for_filename is true, return a filename-escaped form."""
- if for_filename:
- # For both name and version any runs of non-alphanumeric or '.'
- # characters are replaced with a single '-'. Additionally any
- # spaces in the version string become '.'
- name = _FILESAFE.sub('-', name)
- version = _FILESAFE.sub('-', version.replace(' ', '.'))
- return '%s-%s' % (name, version)
-
-
-class LegacyMetadata(object):
- """The legacy metadata of a release.
-
- Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can
- instantiate the class with one of these arguments (or none):
- - *path*, the path to a metadata file
- - *fileobj* give a file-like object with metadata as content
- - *mapping* is a dict-like object
- - *scheme* is a version scheme name
- """
- # TODO document the mapping API and UNKNOWN default key
-
- def __init__(self, path=None, fileobj=None, mapping=None,
- scheme='default'):
- if [path, fileobj, mapping].count(None) < 2:
- raise TypeError('path, fileobj and mapping are exclusive')
- self._fields = {}
- self.requires_files = []
- self._dependencies = None
- self.scheme = scheme
- if path is not None:
- self.read(path)
- elif fileobj is not None:
- self.read_file(fileobj)
- elif mapping is not None:
- self.update(mapping)
- self.set_metadata_version()
-
- def set_metadata_version(self):
- self._fields['Metadata-Version'] = _best_version(self._fields)
-
- def _write_field(self, fileobj, name, value):
- fileobj.write('%s: %s\n' % (name, value))
-
- def __getitem__(self, name):
- return self.get(name)
-
- def __setitem__(self, name, value):
- return self.set(name, value)
-
- def __delitem__(self, name):
- field_name = self._convert_name(name)
- try:
- del self._fields[field_name]
- except KeyError:
- raise KeyError(name)
-
- def __contains__(self, name):
- return (name in self._fields or
- self._convert_name(name) in self._fields)
-
- def _convert_name(self, name):
- if name in _ALL_FIELDS:
- return name
- name = name.replace('-', '_').lower()
- return _ATTR2FIELD.get(name, name)
-
- def _default_value(self, name):
- if name in _LISTFIELDS or name in _ELEMENTSFIELD:
- return []
- return 'UNKNOWN'
-
- def _remove_line_prefix(self, value):
- if self.metadata_version in ('1.0', '1.1'):
- return _LINE_PREFIX_PRE_1_2.sub('\n', value)
- else:
- return _LINE_PREFIX_1_2.sub('\n', value)
-
- def __getattr__(self, name):
- if name in _ATTR2FIELD:
- return self[name]
- raise AttributeError(name)
-
- #
- # Public API
- #
-
-# dependencies = property(_get_dependencies, _set_dependencies)
-
- def get_fullname(self, filesafe=False):
- """Return the distribution name with version.
-
- If filesafe is true, return a filename-escaped form."""
- return _get_name_and_version(self['Name'], self['Version'], filesafe)
-
- def is_field(self, name):
- """return True if name is a valid metadata key"""
- name = self._convert_name(name)
- return name in _ALL_FIELDS
-
- def is_multi_field(self, name):
- name = self._convert_name(name)
- return name in _LISTFIELDS
-
- def read(self, filepath):
- """Read the metadata values from a file path."""
- fp = codecs.open(filepath, 'r', encoding='utf-8')
- try:
- self.read_file(fp)
- finally:
- fp.close()
-
- def read_file(self, fileob):
- """Read the metadata values from a file object."""
- msg = message_from_file(fileob)
- self._fields['Metadata-Version'] = msg['metadata-version']
-
- # When reading, get all the fields we can
- for field in _ALL_FIELDS:
- if field not in msg:
- continue
- if field in _LISTFIELDS:
- # we can have multiple lines
- values = msg.get_all(field)
- if field in _LISTTUPLEFIELDS and values is not None:
- values = [tuple(value.split(',')) for value in values]
- self.set(field, values)
- else:
- # single line
- value = msg[field]
- if value is not None and value != 'UNKNOWN':
- self.set(field, value)
- self.set_metadata_version()
-
- def write(self, filepath, skip_unknown=False):
- """Write the metadata fields to filepath."""
- fp = codecs.open(filepath, 'w', encoding='utf-8')
- try:
- self.write_file(fp, skip_unknown)
- finally:
- fp.close()
-
- def write_file(self, fileobject, skip_unknown=False):
- """Write the PKG-INFO format data to a file object."""
- self.set_metadata_version()
-
- for field in _version2fieldlist(self['Metadata-Version']):
- values = self.get(field)
- if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):
- continue
- if field in _ELEMENTSFIELD:
- self._write_field(fileobject, field, ','.join(values))
- continue
- if field not in _LISTFIELDS:
- if field == 'Description':
- if self.metadata_version in ('1.0', '1.1'):
- values = values.replace('\n', '\n ')
- else:
- values = values.replace('\n', '\n |')
- values = [values]
-
- if field in _LISTTUPLEFIELDS:
- values = [','.join(value) for value in values]
-
- for value in values:
- self._write_field(fileobject, field, value)
-
- def update(self, other=None, **kwargs):
- """Set metadata values from the given iterable `other` and kwargs.
-
- Behavior is like `dict.update`: If `other` has a ``keys`` method,
- they are looped over and ``self[key]`` is assigned ``other[key]``.
- Else, ``other`` is an iterable of ``(key, value)`` iterables.
-
- Keys that don't match a metadata field or that have an empty value are
- dropped.
- """
- def _set(key, value):
- if key in _ATTR2FIELD and value:
- self.set(self._convert_name(key), value)
-
- if not other:
- # other is None or empty container
- pass
- elif hasattr(other, 'keys'):
- for k in other.keys():
- _set(k, other[k])
- else:
- for k, v in other:
- _set(k, v)
-
- if kwargs:
- for k, v in kwargs.items():
- _set(k, v)
-
- def set(self, name, value):
- """Control then set a metadata field."""
- name = self._convert_name(name)
-
- if ((name in _ELEMENTSFIELD or name == 'Platform') and
- not isinstance(value, (list, tuple))):
- if isinstance(value, string_types):
- value = [v.strip() for v in value.split(',')]
- else:
- value = []
- elif (name in _LISTFIELDS and
- not isinstance(value, (list, tuple))):
- if isinstance(value, string_types):
- value = [value]
- else:
- value = []
-
- if logger.isEnabledFor(logging.WARNING):
- project_name = self['Name']
-
- scheme = get_scheme(self.scheme)
- if name in _PREDICATE_FIELDS and value is not None:
- for v in value:
- # check that the values are valid
- if not scheme.is_valid_matcher(v.split(';')[0]):
- logger.warning(
- "'%s': '%s' is not valid (field '%s')",
- project_name, v, name)
- # FIXME this rejects UNKNOWN, is that right?
- elif name in _VERSIONS_FIELDS and value is not None:
- if not scheme.is_valid_constraint_list(value):
- logger.warning("'%s': '%s' is not a valid version (field '%s')",
- project_name, value, name)
- elif name in _VERSION_FIELDS and value is not None:
- if not scheme.is_valid_version(value):
- logger.warning("'%s': '%s' is not a valid version (field '%s')",
- project_name, value, name)
-
- if name in _UNICODEFIELDS:
- if name == 'Description':
- value = self._remove_line_prefix(value)
-
- self._fields[name] = value
-
- def get(self, name, default=_MISSING):
- """Get a metadata field."""
- name = self._convert_name(name)
- if name not in self._fields:
- if default is _MISSING:
- default = self._default_value(name)
- return default
- if name in _UNICODEFIELDS:
- value = self._fields[name]
- return value
- elif name in _LISTFIELDS:
- value = self._fields[name]
- if value is None:
- return []
- res = []
- for val in value:
- if name not in _LISTTUPLEFIELDS:
- res.append(val)
- else:
- # That's for Project-URL
- res.append((val[0], val[1]))
- return res
-
- elif name in _ELEMENTSFIELD:
- value = self._fields[name]
- if isinstance(value, string_types):
- return value.split(',')
- return self._fields[name]
-
- def check(self, strict=False):
- """Check if the metadata is compliant. If strict is True then raise if
- no Name or Version are provided"""
- self.set_metadata_version()
-
- # XXX should check the versions (if the file was loaded)
- missing, warnings = [], []
-
- for attr in ('Name', 'Version'): # required by PEP 345
- if attr not in self:
- missing.append(attr)
-
- if strict and missing != []:
- msg = 'missing required metadata: %s' % ', '.join(missing)
- raise MetadataMissingError(msg)
-
- for attr in ('Home-page', 'Author'):
- if attr not in self:
- missing.append(attr)
-
- # checking metadata 1.2 (XXX needs to check 1.1, 1.0)
- if self['Metadata-Version'] != '1.2':
- return missing, warnings
-
- scheme = get_scheme(self.scheme)
-
- def are_valid_constraints(value):
- for v in value:
- if not scheme.is_valid_matcher(v.split(';')[0]):
- return False
- return True
-
- for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints),
- (_VERSIONS_FIELDS,
- scheme.is_valid_constraint_list),
- (_VERSION_FIELDS,
- scheme.is_valid_version)):
- for field in fields:
- value = self.get(field, None)
- if value is not None and not controller(value):
- warnings.append("Wrong value for '%s': %s" % (field, value))
-
- return missing, warnings
-
- def todict(self, skip_missing=False):
- """Return fields as a dict.
-
- Field names will be converted to use the underscore-lowercase style
- instead of hyphen-mixed case (i.e. home_page instead of Home-page).
- """
- self.set_metadata_version()
-
- mapping_1_0 = (
- ('metadata_version', 'Metadata-Version'),
- ('name', 'Name'),
- ('version', 'Version'),
- ('summary', 'Summary'),
- ('home_page', 'Home-page'),
- ('author', 'Author'),
- ('author_email', 'Author-email'),
- ('license', 'License'),
- ('description', 'Description'),
- ('keywords', 'Keywords'),
- ('platform', 'Platform'),
- ('classifiers', 'Classifier'),
- ('download_url', 'Download-URL'),
- )
-
- data = {}
- for key, field_name in mapping_1_0:
- if not skip_missing or field_name in self._fields:
- data[key] = self[field_name]
-
- if self['Metadata-Version'] == '1.2':
- mapping_1_2 = (
- ('requires_dist', 'Requires-Dist'),
- ('requires_python', 'Requires-Python'),
- ('requires_external', 'Requires-External'),
- ('provides_dist', 'Provides-Dist'),
- ('obsoletes_dist', 'Obsoletes-Dist'),
- ('project_url', 'Project-URL'),
- ('maintainer', 'Maintainer'),
- ('maintainer_email', 'Maintainer-email'),
- )
- for key, field_name in mapping_1_2:
- if not skip_missing or field_name in self._fields:
- if key != 'project_url':
- data[key] = self[field_name]
- else:
- data[key] = [','.join(u) for u in self[field_name]]
-
- elif self['Metadata-Version'] == '1.1':
- mapping_1_1 = (
- ('provides', 'Provides'),
- ('requires', 'Requires'),
- ('obsoletes', 'Obsoletes'),
- )
- for key, field_name in mapping_1_1:
- if not skip_missing or field_name in self._fields:
- data[key] = self[field_name]
-
- return data
-
- def add_requirements(self, requirements):
- if self['Metadata-Version'] == '1.1':
- # we can't have 1.1 metadata *and* Setuptools requires
- for field in ('Obsoletes', 'Requires', 'Provides'):
- if field in self:
- del self[field]
- self['Requires-Dist'] += requirements
-
- # Mapping API
- # TODO could add iter* variants
-
- def keys(self):
- return list(_version2fieldlist(self['Metadata-Version']))
-
- def __iter__(self):
- for key in self.keys():
- yield key
-
- def values(self):
- return [self[key] for key in self.keys()]
-
- def items(self):
- return [(key, self[key]) for key in self.keys()]
-
- def __repr__(self):
- return '<%s %s %s>' % (self.__class__.__name__, self.name,
- self.version)
-
-
-METADATA_FILENAME = 'pydist.json'
-WHEEL_METADATA_FILENAME = 'metadata.json'
-
-
-class Metadata(object):
- """
- The metadata of a release. This implementation uses 2.0 (JSON)
- metadata where possible. If not possible, it wraps a LegacyMetadata
- instance which handles the key-value metadata format.
- """
-
- METADATA_VERSION_MATCHER = re.compile('^\d+(\.\d+)*$')
-
- NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I)
-
- VERSION_MATCHER = PEP440_VERSION_RE
-
- SUMMARY_MATCHER = re.compile('.{1,2047}')
-
- METADATA_VERSION = '2.0'
-
- GENERATOR = 'distlib (%s)' % __version__
-
- MANDATORY_KEYS = {
- 'name': (),
- 'version': (),
- 'summary': ('legacy',),
- }
-
- INDEX_KEYS = ('name version license summary description author '
- 'author_email keywords platform home_page classifiers '
- 'download_url')
-
- DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires '
- 'dev_requires provides meta_requires obsoleted_by '
- 'supports_environments')
-
- SYNTAX_VALIDATORS = {
- 'metadata_version': (METADATA_VERSION_MATCHER, ()),
- 'name': (NAME_MATCHER, ('legacy',)),
- 'version': (VERSION_MATCHER, ('legacy',)),
- 'summary': (SUMMARY_MATCHER, ('legacy',)),
- }
-
- __slots__ = ('_legacy', '_data', 'scheme')
-
- def __init__(self, path=None, fileobj=None, mapping=None,
- scheme='default'):
- if [path, fileobj, mapping].count(None) < 2:
- raise TypeError('path, fileobj and mapping are exclusive')
- self._legacy = None
- self._data = None
- self.scheme = scheme
- #import pdb; pdb.set_trace()
- if mapping is not None:
- try:
- self._validate_mapping(mapping, scheme)
- self._data = mapping
- except MetadataUnrecognizedVersionError:
- self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme)
- self.validate()
- else:
- data = None
- if path:
- with open(path, 'rb') as f:
- data = f.read()
- elif fileobj:
- data = fileobj.read()
- if data is None:
- # Initialised with no args - to be added
- self._data = {
- 'metadata_version': self.METADATA_VERSION,
- 'generator': self.GENERATOR,
- }
- else:
- if not isinstance(data, text_type):
- data = data.decode('utf-8')
- try:
- self._data = json.loads(data)
- self._validate_mapping(self._data, scheme)
- except ValueError:
- # Note: MetadataUnrecognizedVersionError does not
- # inherit from ValueError (it's a DistlibException,
- # which should not inherit from ValueError).
- # The ValueError comes from the json.load - if that
- # succeeds and we get a validation error, we want
- # that to propagate
- self._legacy = LegacyMetadata(fileobj=StringIO(data),
- scheme=scheme)
- self.validate()
-
- common_keys = set(('name', 'version', 'license', 'keywords', 'summary'))
-
- none_list = (None, list)
- none_dict = (None, dict)
-
- mapped_keys = {
- 'run_requires': ('Requires-Dist', list),
- 'build_requires': ('Setup-Requires-Dist', list),
- 'dev_requires': none_list,
- 'test_requires': none_list,
- 'meta_requires': none_list,
- 'extras': ('Provides-Extra', list),
- 'modules': none_list,
- 'namespaces': none_list,
- 'exports': none_dict,
- 'commands': none_dict,
- 'classifiers': ('Classifier', list),
- 'source_url': ('Download-URL', None),
- 'metadata_version': ('Metadata-Version', None),
- }
-
- del none_list, none_dict
-
- def __getattribute__(self, key):
- common = object.__getattribute__(self, 'common_keys')
- mapped = object.__getattribute__(self, 'mapped_keys')
- if key in mapped:
- lk, maker = mapped[key]
- if self._legacy:
- if lk is None:
- result = None if maker is None else maker()
- else:
- result = self._legacy.get(lk)
- else:
- value = None if maker is None else maker()
- if key not in ('commands', 'exports', 'modules', 'namespaces',
- 'classifiers'):
- result = self._data.get(key, value)
- else:
- # special cases for PEP 459
- sentinel = object()
- result = sentinel
- d = self._data.get('extensions')
- if d:
- if key == 'commands':
- result = d.get('python.commands', value)
- elif key == 'classifiers':
- d = d.get('python.details')
- if d:
- result = d.get(key, value)
- else:
- d = d.get('python.exports')
- if not d:
- d = self._data.get('python.exports')
- if d:
- result = d.get(key, value)
- if result is sentinel:
- result = value
- elif key not in common:
- result = object.__getattribute__(self, key)
- elif self._legacy:
- result = self._legacy.get(key)
- else:
- result = self._data.get(key)
- return result
-
- def _validate_value(self, key, value, scheme=None):
- if key in self.SYNTAX_VALIDATORS:
- pattern, exclusions = self.SYNTAX_VALIDATORS[key]
- if (scheme or self.scheme) not in exclusions:
- m = pattern.match(value)
- if not m:
- raise MetadataInvalidError("'%s' is an invalid value for "
- "the '%s' property" % (value,
- key))
-
- def __setattr__(self, key, value):
- self._validate_value(key, value)
- common = object.__getattribute__(self, 'common_keys')
- mapped = object.__getattribute__(self, 'mapped_keys')
- if key in mapped:
- lk, _ = mapped[key]
- if self._legacy:
- if lk is None:
- raise NotImplementedError
- self._legacy[lk] = value
- elif key not in ('commands', 'exports', 'modules', 'namespaces',
- 'classifiers'):
- self._data[key] = value
- else:
- # special cases for PEP 459
- d = self._data.setdefault('extensions', {})
- if key == 'commands':
- d['python.commands'] = value
- elif key == 'classifiers':
- d = d.setdefault('python.details', {})
- d[key] = value
- else:
- d = d.setdefault('python.exports', {})
- d[key] = value
- elif key not in common:
- object.__setattr__(self, key, value)
- else:
- if key == 'keywords':
- if isinstance(value, string_types):
- value = value.strip()
- if value:
- value = value.split()
- else:
- value = []
- if self._legacy:
- self._legacy[key] = value
- else:
- self._data[key] = value
-
- @property
- def name_and_version(self):
- return _get_name_and_version(self.name, self.version, True)
-
- @property
- def provides(self):
- if self._legacy:
- result = self._legacy['Provides-Dist']
- else:
- result = self._data.setdefault('provides', [])
- s = '%s (%s)' % (self.name, self.version)
- if s not in result:
- result.append(s)
- return result
-
- @provides.setter
- def provides(self, value):
- if self._legacy:
- self._legacy['Provides-Dist'] = value
- else:
- self._data['provides'] = value
-
- def get_requirements(self, reqts, extras=None, env=None):
- """
- Base method to get dependencies, given a set of extras
- to satisfy and an optional environment context.
- :param reqts: A list of sometimes-wanted dependencies,
- perhaps dependent on extras and environment.
- :param extras: A list of optional components being requested.
- :param env: An optional environment for marker evaluation.
- """
- if self._legacy:
- result = reqts
- else:
- result = []
- extras = get_extras(extras or [], self.extras)
- for d in reqts:
- if 'extra' not in d and 'environment' not in d:
- # unconditional
- include = True
- else:
- if 'extra' not in d:
- # Not extra-dependent - only environment-dependent
- include = True
- else:
- include = d.get('extra') in extras
- if include:
- # Not excluded because of extras, check environment
- marker = d.get('environment')
- if marker:
- include = interpret(marker, env)
- if include:
- result.extend(d['requires'])
- for key in ('build', 'dev', 'test'):
- e = ':%s:' % key
- if e in extras:
- extras.remove(e)
- # A recursive call, but it should terminate since 'test'
- # has been removed from the extras
- reqts = self._data.get('%s_requires' % key, [])
- result.extend(self.get_requirements(reqts, extras=extras,
- env=env))
- return result
-
- @property
- def dictionary(self):
- if self._legacy:
- return self._from_legacy()
- return self._data
-
- @property
- def dependencies(self):
- if self._legacy:
- raise NotImplementedError
- else:
- return extract_by_key(self._data, self.DEPENDENCY_KEYS)
-
- @dependencies.setter
- def dependencies(self, value):
- if self._legacy:
- raise NotImplementedError
- else:
- self._data.update(value)
-
- def _validate_mapping(self, mapping, scheme):
- if mapping.get('metadata_version') != self.METADATA_VERSION:
- raise MetadataUnrecognizedVersionError()
- missing = []
- for key, exclusions in self.MANDATORY_KEYS.items():
- if key not in mapping:
- if scheme not in exclusions:
- missing.append(key)
- if missing:
- msg = 'Missing metadata items: %s' % ', '.join(missing)
- raise MetadataMissingError(msg)
- for k, v in mapping.items():
- self._validate_value(k, v, scheme)
-
- def validate(self):
- if self._legacy:
- missing, warnings = self._legacy.check(True)
- if missing or warnings:
- logger.warning('Metadata: missing: %s, warnings: %s',
- missing, warnings)
- else:
- self._validate_mapping(self._data, self.scheme)
-
- def todict(self):
- if self._legacy:
- return self._legacy.todict(True)
- else:
- result = extract_by_key(self._data, self.INDEX_KEYS)
- return result
-
- def _from_legacy(self):
- assert self._legacy and not self._data
- result = {
- 'metadata_version': self.METADATA_VERSION,
- 'generator': self.GENERATOR,
- }
- lmd = self._legacy.todict(True) # skip missing ones
- for k in ('name', 'version', 'license', 'summary', 'description',
- 'classifier'):
- if k in lmd:
- if k == 'classifier':
- nk = 'classifiers'
- else:
- nk = k
- result[nk] = lmd[k]
- kw = lmd.get('Keywords', [])
- if kw == ['']:
- kw = []
- result['keywords'] = kw
- keys = (('requires_dist', 'run_requires'),
- ('setup_requires_dist', 'build_requires'))
- for ok, nk in keys:
- if ok in lmd and lmd[ok]:
- result[nk] = [{'requires': lmd[ok]}]
- result['provides'] = self.provides
- author = {}
- maintainer = {}
- return result
-
- LEGACY_MAPPING = {
- 'name': 'Name',
- 'version': 'Version',
- 'license': 'License',
- 'summary': 'Summary',
- 'description': 'Description',
- 'classifiers': 'Classifier',
- }
-
- def _to_legacy(self):
- def process_entries(entries):
- reqts = set()
- for e in entries:
- extra = e.get('extra')
- env = e.get('environment')
- rlist = e['requires']
- for r in rlist:
- if not env and not extra:
- reqts.add(r)
- else:
- marker = ''
- if extra:
- marker = 'extra == "%s"' % extra
- if env:
- if marker:
- marker = '(%s) and %s' % (env, marker)
- else:
- marker = env
- reqts.add(';'.join((r, marker)))
- return reqts
-
- assert self._data and not self._legacy
- result = LegacyMetadata()
- nmd = self._data
- for nk, ok in self.LEGACY_MAPPING.items():
- if nk in nmd:
- result[ok] = nmd[nk]
- r1 = process_entries(self.run_requires + self.meta_requires)
- r2 = process_entries(self.build_requires + self.dev_requires)
- if self.extras:
- result['Provides-Extra'] = sorted(self.extras)
- result['Requires-Dist'] = sorted(r1)
- result['Setup-Requires-Dist'] = sorted(r2)
- # TODO: other fields such as contacts
- return result
-
- def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True):
- if [path, fileobj].count(None) != 1:
- raise ValueError('Exactly one of path and fileobj is needed')
- self.validate()
- if legacy:
- if self._legacy:
- legacy_md = self._legacy
- else:
- legacy_md = self._to_legacy()
- if path:
- legacy_md.write(path, skip_unknown=skip_unknown)
- else:
- legacy_md.write_file(fileobj, skip_unknown=skip_unknown)
- else:
- if self._legacy:
- d = self._from_legacy()
- else:
- d = self._data
- if fileobj:
- json.dump(d, fileobj, ensure_ascii=True, indent=2,
- sort_keys=True)
- else:
- with codecs.open(path, 'w', 'utf-8') as f:
- json.dump(d, f, ensure_ascii=True, indent=2,
- sort_keys=True)
-
- def add_requirements(self, requirements):
- if self._legacy:
- self._legacy.add_requirements(requirements)
- else:
- run_requires = self._data.setdefault('run_requires', [])
- always = None
- for entry in run_requires:
- if 'environment' not in entry and 'extra' not in entry:
- always = entry
- break
- if always is None:
- always = { 'requires': requirements }
- run_requires.insert(0, always)
- else:
- rset = set(always['requires']) | set(requirements)
- always['requires'] = sorted(rset)
-
- def __repr__(self):
- name = self.name or '(no name)'
- version = self.version or 'no version'
- return '<%s %s %s (%s)>' % (self.__class__.__name__,
- self.metadata_version, name, version)
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/resources.py b/env/Lib/site-packages/pip/_vendor/distlib/resources.py
deleted file mode 100644
index f07cde2..0000000
--- a/env/Lib/site-packages/pip/_vendor/distlib/resources.py
+++ /dev/null
@@ -1,355 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2013-2016 Vinay Sajip.
-# Licensed to the Python Software Foundation under a contributor agreement.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-from __future__ import unicode_literals
-
-import bisect
-import io
-import logging
-import os
-import pkgutil
-import shutil
-import sys
-import types
-import zipimport
-
-from . import DistlibException
-from .util import cached_property, get_cache_base, path_to_cache_dir, Cache
-
-logger = logging.getLogger(__name__)
-
-
-cache = None # created when needed
-
-
-class ResourceCache(Cache):
- def __init__(self, base=None):
- if base is None:
- # Use native string to avoid issues on 2.x: see Python #20140.
- base = os.path.join(get_cache_base(), str('resource-cache'))
- super(ResourceCache, self).__init__(base)
-
- def is_stale(self, resource, path):
- """
- Is the cache stale for the given resource?
-
- :param resource: The :class:`Resource` being cached.
- :param path: The path of the resource in the cache.
- :return: True if the cache is stale.
- """
- # Cache invalidation is a hard problem :-)
- return True
-
- def get(self, resource):
- """
- Get a resource into the cache,
-
- :param resource: A :class:`Resource` instance.
- :return: The pathname of the resource in the cache.
- """
- prefix, path = resource.finder.get_cache_info(resource)
- if prefix is None:
- result = path
- else:
- result = os.path.join(self.base, self.prefix_to_dir(prefix), path)
- dirname = os.path.dirname(result)
- if not os.path.isdir(dirname):
- os.makedirs(dirname)
- if not os.path.exists(result):
- stale = True
- else:
- stale = self.is_stale(resource, path)
- if stale:
- # write the bytes of the resource to the cache location
- with open(result, 'wb') as f:
- f.write(resource.bytes)
- return result
-
-
-class ResourceBase(object):
- def __init__(self, finder, name):
- self.finder = finder
- self.name = name
-
-
-class Resource(ResourceBase):
- """
- A class representing an in-package resource, such as a data file. This is
- not normally instantiated by user code, but rather by a
- :class:`ResourceFinder` which manages the resource.
- """
- is_container = False # Backwards compatibility
-
- def as_stream(self):
- """
- Get the resource as a stream.
-
- This is not a property to make it obvious that it returns a new stream
- each time.
- """
- return self.finder.get_stream(self)
-
- @cached_property
- def file_path(self):
- global cache
- if cache is None:
- cache = ResourceCache()
- return cache.get(self)
-
- @cached_property
- def bytes(self):
- return self.finder.get_bytes(self)
-
- @cached_property
- def size(self):
- return self.finder.get_size(self)
-
-
-class ResourceContainer(ResourceBase):
- is_container = True # Backwards compatibility
-
- @cached_property
- def resources(self):
- return self.finder.get_resources(self)
-
-
-class ResourceFinder(object):
- """
- Resource finder for file system resources.
- """
-
- if sys.platform.startswith('java'):
- skipped_extensions = ('.pyc', '.pyo', '.class')
- else:
- skipped_extensions = ('.pyc', '.pyo')
-
- def __init__(self, module):
- self.module = module
- self.loader = getattr(module, '__loader__', None)
- self.base = os.path.dirname(getattr(module, '__file__', ''))
-
- def _adjust_path(self, path):
- return os.path.realpath(path)
-
- def _make_path(self, resource_name):
- # Issue #50: need to preserve type of path on Python 2.x
- # like os.path._get_sep
- if isinstance(resource_name, bytes): # should only happen on 2.x
- sep = b'/'
- else:
- sep = '/'
- parts = resource_name.split(sep)
- parts.insert(0, self.base)
- result = os.path.join(*parts)
- return self._adjust_path(result)
-
- def _find(self, path):
- return os.path.exists(path)
-
- def get_cache_info(self, resource):
- return None, resource.path
-
- def find(self, resource_name):
- path = self._make_path(resource_name)
- if not self._find(path):
- result = None
- else:
- if self._is_directory(path):
- result = ResourceContainer(self, resource_name)
- else:
- result = Resource(self, resource_name)
- result.path = path
- return result
-
- def get_stream(self, resource):
- return open(resource.path, 'rb')
-
- def get_bytes(self, resource):
- with open(resource.path, 'rb') as f:
- return f.read()
-
- def get_size(self, resource):
- return os.path.getsize(resource.path)
-
- def get_resources(self, resource):
- def allowed(f):
- return (f != '__pycache__' and not
- f.endswith(self.skipped_extensions))
- return set([f for f in os.listdir(resource.path) if allowed(f)])
-
- def is_container(self, resource):
- return self._is_directory(resource.path)
-
- _is_directory = staticmethod(os.path.isdir)
-
- def iterator(self, resource_name):
- resource = self.find(resource_name)
- if resource is not None:
- todo = [resource]
- while todo:
- resource = todo.pop(0)
- yield resource
- if resource.is_container:
- rname = resource.name
- for name in resource.resources:
- if not rname:
- new_name = name
- else:
- new_name = '/'.join([rname, name])
- child = self.find(new_name)
- if child.is_container:
- todo.append(child)
- else:
- yield child
-
-
-class ZipResourceFinder(ResourceFinder):
- """
- Resource finder for resources in .zip files.
- """
- def __init__(self, module):
- super(ZipResourceFinder, self).__init__(module)
- archive = self.loader.archive
- self.prefix_len = 1 + len(archive)
- # PyPy doesn't have a _files attr on zipimporter, and you can't set one
- if hasattr(self.loader, '_files'):
- self._files = self.loader._files
- else:
- self._files = zipimport._zip_directory_cache[archive]
- self.index = sorted(self._files)
-
- def _adjust_path(self, path):
- return path
-
- def _find(self, path):
- path = path[self.prefix_len:]
- if path in self._files:
- result = True
- else:
- if path and path[-1] != os.sep:
- path = path + os.sep
- i = bisect.bisect(self.index, path)
- try:
- result = self.index[i].startswith(path)
- except IndexError:
- result = False
- if not result:
- logger.debug('_find failed: %r %r', path, self.loader.prefix)
- else:
- logger.debug('_find worked: %r %r', path, self.loader.prefix)
- return result
-
- def get_cache_info(self, resource):
- prefix = self.loader.archive
- path = resource.path[1 + len(prefix):]
- return prefix, path
-
- def get_bytes(self, resource):
- return self.loader.get_data(resource.path)
-
- def get_stream(self, resource):
- return io.BytesIO(self.get_bytes(resource))
-
- def get_size(self, resource):
- path = resource.path[self.prefix_len:]
- return self._files[path][3]
-
- def get_resources(self, resource):
- path = resource.path[self.prefix_len:]
- if path and path[-1] != os.sep:
- path += os.sep
- plen = len(path)
- result = set()
- i = bisect.bisect(self.index, path)
- while i < len(self.index):
- if not self.index[i].startswith(path):
- break
- s = self.index[i][plen:]
- result.add(s.split(os.sep, 1)[0]) # only immediate children
- i += 1
- return result
-
- def _is_directory(self, path):
- path = path[self.prefix_len:]
- if path and path[-1] != os.sep:
- path += os.sep
- i = bisect.bisect(self.index, path)
- try:
- result = self.index[i].startswith(path)
- except IndexError:
- result = False
- return result
-
-_finder_registry = {
- type(None): ResourceFinder,
- zipimport.zipimporter: ZipResourceFinder
-}
-
-try:
- # In Python 3.6, _frozen_importlib -> _frozen_importlib_external
- try:
- import _frozen_importlib_external as _fi
- except ImportError:
- import _frozen_importlib as _fi
- _finder_registry[_fi.SourceFileLoader] = ResourceFinder
- _finder_registry[_fi.FileFinder] = ResourceFinder
- del _fi
-except (ImportError, AttributeError):
- pass
-
-
-def register_finder(loader, finder_maker):
- _finder_registry[type(loader)] = finder_maker
-
-_finder_cache = {}
-
-
-def finder(package):
- """
- Return a resource finder for a package.
- :param package: The name of the package.
- :return: A :class:`ResourceFinder` instance for the package.
- """
- if package in _finder_cache:
- result = _finder_cache[package]
- else:
- if package not in sys.modules:
- __import__(package)
- module = sys.modules[package]
- path = getattr(module, '__path__', None)
- if path is None:
- raise DistlibException('You cannot get a finder for a module, '
- 'only for a package')
- loader = getattr(module, '__loader__', None)
- finder_maker = _finder_registry.get(type(loader))
- if finder_maker is None:
- raise DistlibException('Unable to locate finder for %r' % package)
- result = finder_maker(module)
- _finder_cache[package] = result
- return result
-
-
-_dummy_module = types.ModuleType(str('__dummy__'))
-
-
-def finder_for_path(path):
- """
- Return a resource finder for a path, which should represent a container.
-
- :param path: The path.
- :return: A :class:`ResourceFinder` instance for the path.
- """
- result = None
- # calls any path hooks, gets importer into cache
- pkgutil.get_importer(path)
- loader = sys.path_importer_cache.get(path)
- finder = _finder_registry.get(type(loader))
- if finder:
- module = _dummy_module
- module.__file__ = os.path.join(path, '')
- module.__loader__ = loader
- result = finder(module)
- return result
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/scripts.py b/env/Lib/site-packages/pip/_vendor/distlib/scripts.py
deleted file mode 100644
index 792fc2e..0000000
--- a/env/Lib/site-packages/pip/_vendor/distlib/scripts.py
+++ /dev/null
@@ -1,384 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2013-2015 Vinay Sajip.
-# Licensed to the Python Software Foundation under a contributor agreement.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-from io import BytesIO
-import logging
-import os
-import re
-import struct
-import sys
-
-from .compat import sysconfig, detect_encoding, ZipFile
-from .resources import finder
-from .util import (FileOperator, get_export_entry, convert_path,
- get_executable, in_venv)
-
-logger = logging.getLogger(__name__)
-
-_DEFAULT_MANIFEST = '''
-
-
-
-
-
-
-
-
-
-
-
-
-'''.strip()
-
-# check if Python is called on the first line with this expression
-FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
-SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
-if __name__ == '__main__':
- import sys, re
-
- def _resolve(module, func):
- __import__(module)
- mod = sys.modules[module]
- parts = func.split('.')
- result = getattr(mod, parts.pop(0))
- for p in parts:
- result = getattr(result, p)
- return result
-
- try:
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
-
- func = _resolve('%(module)s', '%(func)s')
- rc = func() # None interpreted as 0
- except Exception as e: # only supporting Python >= 2.6
- sys.stderr.write('%%s\\n' %% e)
- rc = 1
- sys.exit(rc)
-'''
-
-
-def _enquote_executable(executable):
- if ' ' in executable:
- # make sure we quote only the executable in case of env
- # for example /usr/bin/env "/dir with spaces/bin/jython"
- # instead of "/usr/bin/env /dir with spaces/bin/jython"
- # otherwise whole
- if executable.startswith('/usr/bin/env '):
- env, _executable = executable.split(' ', 1)
- if ' ' in _executable and not _executable.startswith('"'):
- executable = '%s "%s"' % (env, _executable)
- else:
- if not executable.startswith('"'):
- executable = '"%s"' % executable
- return executable
-
-
-class ScriptMaker(object):
- """
- A class to copy or create scripts from source scripts or callable
- specifications.
- """
- script_template = SCRIPT_TEMPLATE
-
- executable = None # for shebangs
-
- def __init__(self, source_dir, target_dir, add_launchers=True,
- dry_run=False, fileop=None):
- self.source_dir = source_dir
- self.target_dir = target_dir
- self.add_launchers = add_launchers
- self.force = False
- self.clobber = False
- # It only makes sense to set mode bits on POSIX.
- self.set_mode = (os.name == 'posix') or (os.name == 'java' and
- os._name == 'posix')
- self.variants = set(('', 'X.Y'))
- self._fileop = fileop or FileOperator(dry_run)
-
- self._is_nt = os.name == 'nt' or (
- os.name == 'java' and os._name == 'nt')
-
- def _get_alternate_executable(self, executable, options):
- if options.get('gui', False) and self._is_nt: # pragma: no cover
- dn, fn = os.path.split(executable)
- fn = fn.replace('python', 'pythonw')
- executable = os.path.join(dn, fn)
- return executable
-
- if sys.platform.startswith('java'): # pragma: no cover
- def _is_shell(self, executable):
- """
- Determine if the specified executable is a script
- (contains a #! line)
- """
- try:
- with open(executable) as fp:
- return fp.read(2) == '#!'
- except (OSError, IOError):
- logger.warning('Failed to open %s', executable)
- return False
-
- def _fix_jython_executable(self, executable):
- if self._is_shell(executable):
- # Workaround for Jython is not needed on Linux systems.
- import java
-
- if java.lang.System.getProperty('os.name') == 'Linux':
- return executable
- elif executable.lower().endswith('jython.exe'):
- # Use wrapper exe for Jython on Windows
- return executable
- return '/usr/bin/env %s' % executable
-
- def _get_shebang(self, encoding, post_interp=b'', options=None):
- enquote = True
- if self.executable:
- executable = self.executable
- enquote = False # assume this will be taken care of
- elif not sysconfig.is_python_build():
- executable = get_executable()
- elif in_venv(): # pragma: no cover
- executable = os.path.join(sysconfig.get_path('scripts'),
- 'python%s' % sysconfig.get_config_var('EXE'))
- else: # pragma: no cover
- executable = os.path.join(
- sysconfig.get_config_var('BINDIR'),
- 'python%s%s' % (sysconfig.get_config_var('VERSION'),
- sysconfig.get_config_var('EXE')))
- if options:
- executable = self._get_alternate_executable(executable, options)
-
- if sys.platform.startswith('java'): # pragma: no cover
- executable = self._fix_jython_executable(executable)
- # Normalise case for Windows
- executable = os.path.normcase(executable)
- # If the user didn't specify an executable, it may be necessary to
- # cater for executable paths with spaces (not uncommon on Windows)
- if enquote:
- executable = _enquote_executable(executable)
- # Issue #51: don't use fsencode, since we later try to
- # check that the shebang is decodable using utf-8.
- executable = executable.encode('utf-8')
- # in case of IronPython, play safe and enable frames support
- if (sys.platform == 'cli' and '-X:Frames' not in post_interp
- and '-X:FullFrames' not in post_interp): # pragma: no cover
- post_interp += b' -X:Frames'
- shebang = b'#!' + executable + post_interp + b'\n'
- # Python parser starts to read a script using UTF-8 until
- # it gets a #coding:xxx cookie. The shebang has to be the
- # first line of a file, the #coding:xxx cookie cannot be
- # written before. So the shebang has to be decodable from
- # UTF-8.
- try:
- shebang.decode('utf-8')
- except UnicodeDecodeError: # pragma: no cover
- raise ValueError(
- 'The shebang (%r) is not decodable from utf-8' % shebang)
- # If the script is encoded to a custom encoding (use a
- # #coding:xxx cookie), the shebang has to be decodable from
- # the script encoding too.
- if encoding != 'utf-8':
- try:
- shebang.decode(encoding)
- except UnicodeDecodeError: # pragma: no cover
- raise ValueError(
- 'The shebang (%r) is not decodable '
- 'from the script encoding (%r)' % (shebang, encoding))
- return shebang
-
- def _get_script_text(self, entry):
- return self.script_template % dict(module=entry.prefix,
- func=entry.suffix)
-
- manifest = _DEFAULT_MANIFEST
-
- def get_manifest(self, exename):
- base = os.path.basename(exename)
- return self.manifest % base
-
- def _write_script(self, names, shebang, script_bytes, filenames, ext):
- use_launcher = self.add_launchers and self._is_nt
- linesep = os.linesep.encode('utf-8')
- if not use_launcher:
- script_bytes = shebang + linesep + script_bytes
- else: # pragma: no cover
- if ext == 'py':
- launcher = self._get_launcher('t')
- else:
- launcher = self._get_launcher('w')
- stream = BytesIO()
- with ZipFile(stream, 'w') as zf:
- zf.writestr('__main__.py', script_bytes)
- zip_data = stream.getvalue()
- script_bytes = launcher + shebang + linesep + zip_data
- for name in names:
- outname = os.path.join(self.target_dir, name)
- if use_launcher: # pragma: no cover
- n, e = os.path.splitext(outname)
- if e.startswith('.py'):
- outname = n
- outname = '%s.exe' % outname
- try:
- self._fileop.write_binary_file(outname, script_bytes)
- except Exception:
- # Failed writing an executable - it might be in use.
- logger.warning('Failed to write executable - trying to '
- 'use .deleteme logic')
- dfname = '%s.deleteme' % outname
- if os.path.exists(dfname):
- os.remove(dfname) # Not allowed to fail here
- os.rename(outname, dfname) # nor here
- self._fileop.write_binary_file(outname, script_bytes)
- logger.debug('Able to replace executable using '
- '.deleteme logic')
- try:
- os.remove(dfname)
- except Exception:
- pass # still in use - ignore error
- else:
- if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover
- outname = '%s.%s' % (outname, ext)
- if os.path.exists(outname) and not self.clobber:
- logger.warning('Skipping existing file %s', outname)
- continue
- self._fileop.write_binary_file(outname, script_bytes)
- if self.set_mode:
- self._fileop.set_executable_mode([outname])
- filenames.append(outname)
-
- def _make_script(self, entry, filenames, options=None):
- post_interp = b''
- if options:
- args = options.get('interpreter_args', [])
- if args:
- args = ' %s' % ' '.join(args)
- post_interp = args.encode('utf-8')
- shebang = self._get_shebang('utf-8', post_interp, options=options)
- script = self._get_script_text(entry).encode('utf-8')
- name = entry.name
- scriptnames = set()
- if '' in self.variants:
- scriptnames.add(name)
- if 'X' in self.variants:
- scriptnames.add('%s%s' % (name, sys.version[0]))
- if 'X.Y' in self.variants:
- scriptnames.add('%s-%s' % (name, sys.version[:3]))
- if options and options.get('gui', False):
- ext = 'pyw'
- else:
- ext = 'py'
- self._write_script(scriptnames, shebang, script, filenames, ext)
-
- def _copy_script(self, script, filenames):
- adjust = False
- script = os.path.join(self.source_dir, convert_path(script))
- outname = os.path.join(self.target_dir, os.path.basename(script))
- if not self.force and not self._fileop.newer(script, outname):
- logger.debug('not copying %s (up-to-date)', script)
- return
-
- # Always open the file, but ignore failures in dry-run mode --
- # that way, we'll get accurate feedback if we can read the
- # script.
- try:
- f = open(script, 'rb')
- except IOError: # pragma: no cover
- if not self.dry_run:
- raise
- f = None
- else:
- first_line = f.readline()
- if not first_line: # pragma: no cover
- logger.warning('%s: %s is an empty file (skipping)',
- self.get_command_name(), script)
- return
-
- match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
- if match:
- adjust = True
- post_interp = match.group(1) or b''
-
- if not adjust:
- if f:
- f.close()
- self._fileop.copy_file(script, outname)
- if self.set_mode:
- self._fileop.set_executable_mode([outname])
- filenames.append(outname)
- else:
- logger.info('copying and adjusting %s -> %s', script,
- self.target_dir)
- if not self._fileop.dry_run:
- encoding, lines = detect_encoding(f.readline)
- f.seek(0)
- shebang = self._get_shebang(encoding, post_interp)
- if b'pythonw' in first_line: # pragma: no cover
- ext = 'pyw'
- else:
- ext = 'py'
- n = os.path.basename(outname)
- self._write_script([n], shebang, f.read(), filenames, ext)
- if f:
- f.close()
-
- @property
- def dry_run(self):
- return self._fileop.dry_run
-
- @dry_run.setter
- def dry_run(self, value):
- self._fileop.dry_run = value
-
- if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover
- # Executable launcher support.
- # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
-
- def _get_launcher(self, kind):
- if struct.calcsize('P') == 8: # 64-bit
- bits = '64'
- else:
- bits = '32'
- name = '%s%s.exe' % (kind, bits)
- # Issue 31: don't hardcode an absolute package name, but
- # determine it relative to the current package
- distlib_package = __name__.rsplit('.', 1)[0]
- result = finder(distlib_package).find(name).bytes
- return result
-
- # Public API follows
-
- def make(self, specification, options=None):
- """
- Make a script.
-
- :param specification: The specification, which is either a valid export
- entry specification (to make a script from a
- callable) or a filename (to make a script by
- copying from a source location).
- :param options: A dictionary of options controlling script generation.
- :return: A list of all absolute pathnames written to.
- """
- filenames = []
- entry = get_export_entry(specification)
- if entry is None:
- self._copy_script(specification, filenames)
- else:
- self._make_script(entry, filenames, options=options)
- return filenames
-
- def make_multiple(self, specifications, options=None):
- """
- Take a list of specifications and make scripts from them,
- :param specifications: A list of specifications.
- :return: A list of all absolute pathnames written to,
- """
- filenames = []
- for specification in specifications:
- filenames.extend(self.make(specification, options))
- return filenames
diff --git a/env/Lib/site-packages/pip/_vendor/distlib/t32.exe b/env/Lib/site-packages/pip/_vendor/distlib/t32.exe
deleted file mode 100644
index 836211d848ec0ba46667d33cfade3348888062ae..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 89088
zcmeFae|%KM)jxjsN0Lq0SM|dIg
zjq`U}7QAu(4WZlD7Obhe>$bX^zgKX}&3E2;msIfGTMO#sI}2{Vv!Lwys)Fy`wd&T(
zva_=alc=5ANBI?>J}^J^cjU?Q=3k5Ns`Z8Q1N=RF{yhGkJbwn>bACK-ekI-&pZ;*Z
zi^rdzgwIR9NAUOJ+iwXG&QuHhK0#PuNfSb!EUr$)bqZF?FiVyo{0=ccGh=%uZ<+sFYaA7$q6ffq#z+rAKlnG`6t2v7Pc@U?#r
zT7-8Tg3xeK5XOBi>8#JszScJf@NHJ@EU?taWzwx1Bz&|`$5Oqht<>2={uKzq%FF6j
z-7MXVq)U(hoWfN6?Z)4be_j-InF%KBy^Io2FyZ`^!h`?3f)Kl`Zf)Hyh~jNUn}x;r
zI6VkMAur*pyLI(l0GyPA2+)AzTY&eFe_lbjX2|FNAN(Jrz!I&yvD=OTv8Bm6M{xf^
z_4O(B4ng)seJ;NtJEM`lGlmf|z#~F5Zv5=G=y_#tu9@Hu6@2Oy*e98Yi+TO*5V}#>
z>kM0rJmG*>&?@`fZ?J|8LD=7hlC(-kwcCi6_xZ)$X|bF+f`1XaV;Ij7iAiH$q9`rX
z}VQaM^_pu2hz~1n_Xt*FeqWV}`U%z1}dkfVi-KsC)>8Q7<^@*wg
zU=sv(3}AS_Y{x*XLStkHJCaRN4y}i|fmYdD;OUUYYdzt#V6^_7<4>
zEVK%3Qsr(xyOyGT-9^3=;WU&OM5af~I#AK95_CXql)Y1nh#GhE83@N}&GOt-1N
z9zyj3P-(uN;0oTg0j!5_}7GV-P%8fcx$y@jqdavG58R(CA)9B--xO>Nv<*i5sjSP+6i
zo>MY&I*>E;I){6|55uTzc>rjqRr}eP88l#XRjF`_Hhmhv!o9}3ek879C(tt;_QGzS
z?FnQ9&M(cx5PQ!|bm&Cph?#i8OkB-=XC@Z%#E_Y23DuAA3LRyIHxZvT@@Aqe6q8S7
zP4!n-SECcF4GEPp@|;LRFgN7o7%l_`4N#bhh|S+hzEHlmLRExn~mM`POxUstsoNka85=+RlgCe!cj7gF`T%f0gCn+&|aFL(w
z0;Jixjz}j6{xrH>FO06S>cgqDw;@5qf_YkqWbF57UQxf351gCK~BM*xzolQ1-XepkM-2H9!muzttd%pIw01
zVDtyX!Q8z;?JZAYZC=aphSS-Sv*C1>lrhJ%ukM&`)jm&kM-$K1eTurgDjGR~+2Ls>6;@{3{xQQ&CZ38SW#V&JjYivv9&MRP
zdN@Fj+L8(KC%hXk2py$vq+VXb%MM#tnBDc`RE8|uNur;pO=djH`61`ulUYi678Cxv
z=YWq!x`|W)^)Y(0nW&H&=^{$TN*<$k$V;eb5Pgv_V+k;{Iu&~qu^P{z?9Go>Siul&
z9krgf<=!g9)ui;{|*pL5&;*8jbhhHxjjd8XquGKhWg^UE~PR?KzmR?XyKGUv*Tv
z!VzG*(02nJ8+sa5^0Nh~kHH6#!H0>lo$70B=WRI(kU*fC?ZrZ^>@BWlFJlnA9xp_z
zJ=wIr%huy&w-rTY%R~3kDU5`tjx6~9=U)mA3zuGkNSH2
zA*u{}5ct8N^etE^T!AXK1{G8Gx>R3RhmY20gP}*k>buy>$lpe*@IHv*!8XkRBMNA;
z1FOnNWG?RsIT0r`Lv@(UZmS8J!5j+tmjJ2Kcof)cR&a5Pv+YPBU!
z-BIg#RT`nhFjXD3k^OQ{ZHY_5Vvz(Rp-8$ERlt0mo*Id2
zf>r1k+CBmzxZ6SEoX;oSals)#wQw_kJ4gUk(TtAgZ-Jt%)?v9%;am
zrkz$knb`VCx@ON%5{BgPv`coBWmT_0_G=nXtwDx+-(2C2EpKBF6ku~zT1NDf(%3T8
z!I(~EvGEvbLxvP@pQop8Qe*#0l)Zk8_GZz()>COpt7{Q^<9tDAT<8=U&@??uX)g#E
zd45ng)z=tIN+P#@loE6K+2a7l^3qsg!eb!$oJy~$lmwKJ`Mb=W?nm&^8{K6VAxKq)7c
z!;2j97t7iR_HSC2`?Xh${{D@&Q_AOt`z`9mj|7XQQR>vL>jVA^uGFQ`t#KEEP6Vy`
zz7^4HjAd=nYx-Bv^DF-B!;??Ys4t(!vYcy9XqrLA*rbCUr>17N3mXee6DK5-dY4+6
zP;2@khBQX0&lRLx%;odgpTz-w)_|Zp#ut1|&X4p$I^1Wt51l+&;>%rkzH-KpoK<1$
z9GgO?FeZCpjNk(^+&FBOD~H`*<;EcJ<^55Wj8uJ6<8
zd*Ts5d1_A7-K5l?5TuDOfuU`3AM(7vE>mkgDWA|<^$e|z0&PWm^kF+G*>iZkRcrJ3
z`qnQ4*GXV0!FE3AXR#u)O)=^FG+|a*e2JpN7yJ73!T@`%5Fix0SgG$5q)A$3!tf=U
zEok0+11!mRB638GTnO}s%o?R0(j_rj>K+HPp#I=$>~65}4q*~%9e;qJ7CH;G
zjv`F)ld>z`WX8DkZY)7Pv;_Spz}>y7+*KmGq{~a>TJUM+JV;7Y1%&Jqlv8_rQ4hj6N}T+UJ~qc
zgx)!Yo8*hBcSaLvuEtqX=fu{|belbD0`BC1-ogecm;D|5I$5I!5mYL>jFOrz`GRWp
zN0h1c(C!{GxwD$>17Sg$>Hw?eBp&zmwRbjJ#Mj;hRC{wyF2X1igxV6HL&loAABzH}
z1AVS_i937_EqitP96{V+pQDebg)eN0`W$!~c}u~)z@csD({6$ODxDX!CKo!T+gXJP
z46fuk1EO9s*n~GAxD!!tMJW((dM>6WpsN!lE9}_0uds(LRRQ*WzA((RE$
zvg7L6sc}}4K3$K@Qa1L^hnJE&xCk^D%XYhp?y|Uh=UGMB|
zbx4krt)#?}Y}!2VEEL>ZR&2LRgc3^^7=h+HVe|DZUxD2a27U1{4Eod9$6zG;92m*y
zx;*wHL?p-7Gz6)nDKuSPf@m5fVTV30p;dLrluJo+pCbn!P5lIIjwX`iv~uUuitJ*9
z(L_#oZ&NytDfKRkTJDr{0_=~N$rh}5x4ML2fVoMIDt88(V)rq3^&Z;{fJeyjW^-u{!x_Jd(@L2n}G9~h9|kY{xu;#5dbohAz;fSotkK&7-hXO3Wn^mQ%QLOIxvu@sx)>l2vlLC84a
z+|Ywmxd@7O_m)h591@y5ECAIQB(0TT9y=>C5hMz5_s4Y_=k8Ul-!*5J4#LJjofG4~53tD+Faym=oE;-%L{*)~U%z`ZX
zWl;TT4lPryJPPgUOmut@1L#&LGL8aVsqYd9K1T4JcqF;G=IdnZpcrgpN72go!(K|&901XL!qz@~YCii`0pC~wwmK=EZ+$z`Dv)Nue_JSVYab}sk=BD}rd^12q@6$r
zooH%e)W~71(Oa6Ws0`m%8+mJ}1H3>1qCSNd1;GRJRwFX>=s_=nAq16D0s!v^Oe4XB
z2)C)Fgb=<8AOkCpYfEAb)Zd4yz>8=FQJ0)hmn76BJ-Usw70RpviF6Q3;%2E>Nz_Rf
zD2esbdF+0S!{lVS7(sU|ezR2&UbcXydbazBQrTzGGhfa`OAxf0dLZ}yIn}kEJz~?l
zh>qT~>30~CLS(<#G!Eb5j+m0D5+C??v|ZYyumZ8E7eR#$lNMJ3oRV
zVq^<)l=u>2$9y=`7G@+%(ijX#Tbhp≫*an|s~C9@
z0S#LJzxIQPDz90Gb)fz62E;1$2|<P^j3?WK
z7>Ml~_9uNF397QE@zy@$6(~e~C#XRE3LZN-b4gz+W@0kW@W$6@N2QB9x%_+>`}F;~
zb=ctny_py}N@8Puk03TZ4qV}a6=uJb%#speTOjl#I-Sj#hbm)N9TOwX08-l12Z7Lo
zrLxQwPn0Ds^c->o*qZWW78y#qEK~!_hCT=CuAMx2(a>ZUC0hl3QaB^@I#0fGAbG6P
zop#*GP$KEKglDkxiLq(fd>?QBFHf7aF!VV1d@8HL
zcfb-RUnIukjTG}OhSTIuYNQjNYV;^QA3jXa$KIjm
z_5<|X^*Wxln;%Z_SCkH1YBxW&kG~>&`Rl0|fBowBcs=?+qz5ms(P*Vzjgm^WgO^9l
zQ;jf6yS7h_c2NK$B}&!RDqV;{K;`O5lV66R1TvXqlrvQDKw^_v16|M!Ig^d9#xfSf
zz+2&dV;O7L4TE?j*5BRdfq4ePR`v&)ihHpu+9~ApZ>89d|=?4nb9*izq7ybAo#pAa*lH9(z?&16@OPw4PT!V&R&WRv%m2W
z(rVh$%8)Od6ZGsG8@r%Yi8*T8SjTi)ZRPZx2^;iNh-+gnq@w!FC}&d8Vt+w)NYsl|
z2fGi!AkNx;kGEP3@n}F(Vg^DDmwJU&1~6ev;dn`1UFeG9uc#A81AN$Afs@ET_|;n(
z#CGt}KBC!qLP4hHgLOYC7
z#YP^*D|@lmaUzC2g!(|eRkGA6jo|v?YVB=pANvn88rlhB
zjbE$PsIS#3o!6>It8k!!mmnvdvl%5-PO<5F1d?c9V{?!cAB~vT>2&LcLKnI^S6fh+
zzhtq0(W21fmk*4A)G=gPp<|UgnHmYuVr{7d&{N%`wKYzqzhCkI$28@1zhw7(vF@_y
zv~|&&o_A@(P-Er$c0qnUWT95fb+9Yv3c^bW^N%uo=-XC307-&qlMiCH0j>Sy9D6!q
zb`qTAMtKf2$i0tEVFnc8p?qVF(^%s&*5tBTXaeSD4TnK9RO?PG4+2ik7a-Hc#@*wL
zyYr2^r?9*v;2yiMBv4(YeM)CK?nCZg9Bv<6M%x7KQ|)uD6}dY|0%}U9wW|vqix=UzFqdsNnMBPVhY*J00Mue6pf!Olxc6gmdAM~e
zX|lD}@pCMl-q|iAGSqOdJ2`Ejk$rs(mO1W#Rn9(p>C|`M&4$V
zZ{>xK;uEZ`+5Aq!Ds3D{t{Ak{+XwRD+})8p77aVt^H_9(Z3rC1{*jiN0PDXT-P#ji
z4`C~cGXk7c1=t>Rae)1bKc7HV15CpceVt^Wv=xR3AQDs7a!^mJaZqWudt_%_H+2df
zd?=+4@`*G$L~k~RL|Zg5i`R#ug6v5M6rq)9L#P4|wuP!o)VLu~9lH8v#HaE=i&rZ*uP5s)n2QEQ+mVSm}Avy4(QKz__|hs295PyYw%h-9Ew2=qlpv3*Jal~
z?(lU=9H4VU5}u&7u3
z7!uX2&PS7hi|{)E6{6GY-S;6WP*uCNC6`1CIxBzq3`9F%o)~%v2%!4=PAZ?|Oo9DtC{OA39wTb^ijLd34GeVqBp~~gQr59fuQL9DnApE$YpSOSEvb*yQx}G
zHr|K1%Bv(lWo)M=Bks?_hkyUC4H(rfu*zw0i-LG+zLVd=xbbd&33G18g;VB^ZPZ*D5=pH;Rk+jy@=0
zpl{`x&gl~Lot`~wuTo-ZO2rwYw!>=(S!#t1fr&y^yZ3y2xSK03*QOIZ49cSZGmY(p
z0NU3#s;X;CDnAyRazHFtQ+Zm&CL0CwaTbLwtF3J}L6UjZ2lraF4@$;UarJk+(u5b)
zki8CQZjrG_Nu#uaXfw}|;b~oaMY2xHN}JHIa$f`x=maY#L%NAXrfPIoOJ;AYMXH1bRvreD{S+hvNlUep
zPy=fnIOc=?*EY>O(xl94It0lUp}E}7w@%S=CvurvCb5Zj^+vfv@g?l@4CuPg-1|fK
z5$!mk;AA72$biua%1&=sXm8N(ZZEet!o(mg+`ClIiq17o9v&N0VqXF$ADRgTRvOpf
zz-$(5U6OTkPc~4DI!j4dWy0dsGnIr*wj=2HMxCUosE?jSU>)fgTgF`Fj7{7eLdf8K
zZsa|LCgad;84k5cqu9&G9t^m8iB08wL_8&kO~^$hX$=^E>n38TKZz{S`x$1T$BeWg
zC(g}bp!ce&$s9mJjFW3ze~n46C1|?-%edOUhAWAw1rrZ-qDoCI@j0b`v1YAF9jhzU
zLkr^W$KSE1rYaX_(Q0qHArF0fve}i_C?`})RZIgVdjkV}!)sW~hqYYBc
zcPU2hy;IChaB5rpIEcezXz#r2LjdzH?49T7TT=UA=xH%gx>!AGfrWxyMf1~~_N3<`
zox8TL;Uk;-NW&?s?0tq#N~99(Dyp?vMX(pxIJFvE{QDrh;pp&!3eMbM;~^p+bnc?k
z`4#EhGw6U%dCgdXj7qH?gLk>s2652r9TWqhEi=Gq2Nl#W53qYNDmX`>fiMxLq=Fa4
z<_6ek6y#?Qpq${z!@;pJGSwq}b#BPj16tNWC(6|vYW-0(<{D}Y3{1|_bSNl<6M{4y
zGo;CG~=dDVtVdF*QiVSQnb`6a4MqU&~b@F9`0w?QO$_x6F_u*bVhm)iRV=;yEG#BpRNB1}_@+8*79J$Mo$3=al
zKk4vwUjuwNomOS$hhooI0`p$%-*X{UO!u_iJISc*+K>Sqe{;S*9C`dzY${*tXfUTi
zov4d+7Twi+(=0=LECzEze!|Clm5d}%pWmYrNhRe9vpzL#&`D6G9o1MoaNZ@mN@S-z
z0!r>*qBc1H%FybG(Yn~Cfz12}l--4I)ZSG6c+RZ5M4K_UDiB)Hgt4*<3k3E%q*|EA
z%BiRsljIRuUM?-WrV9GHfnP)O{P@a4pM_u=fg{2|^
zx!Cc)Q$r>Z{r>|&5SDLFd0SB-S}KNElAJONNph=2)I|aIAog;q59$?X@ag+d;^Q18
z4Is|y(4&`wx=-Usj70`Y)BjrKK7!-NQR;I0(=M@z&VsdtIGF5LbHFBnjLtImSrG;V
zhVwH{Ad#oCXa9s+#tJ+=l9TYQ@zr#$PsODT
z6jxH;2R1@W0?MOPWiCnZl|RKru#s;E+egE0VJdn`MMEl9qZt(;v_Z@9WKYR~3UGv$
z-8l@#V-;)U4ED$}z@@{K#*@afkT{B3KalQVi7LWlM{KWmjaAN7D%Mz0h(*uR9Kh_g
zQzC56q3|$={kKlw-$Lh^oC!-1L^Jq73M`9~6L=gZa6Z!p!7fXJk;XhZS20mBPa81}
zjM?ilvBgASlb#;6&&K>7nAjMK{-JX@fVq4P&E+gUmknQMYC6|o>j)ERbig_0x_2Ov
z&vh4{>$Pgx#{O@a>~BH3NH(z!K{W9nO!i-+RYNEHj|VcxH6*e7@O_TSK5Ppn`;P`E
z`?V}cMPahnu6@`*<>h&M_yv
z717BF{}j}HXw2Y*C+-EtmB;>!lw^w=5R9MZ^9P3@V$(4MpT;5CC-k;LaOSgx+jl7Y
z$nwCPs1RWeEyNtu(=f)=bYoqF72n=xDl;sh8M9bU|NMEene6*b%|bVX2vU)<9-G)CeV#v1$E67~6Q>7Ms$6
z1VWe?`N|HBPmNQW-cnXvXpJ36H2eC+-LZCl3(+RsPbiE^)ycP_60F
z|ILh*N3+j9;njQxujD{jkDz{)w&x)9<1QG~z_A4jjEk*=xU3z9#L&(pO40hYFW+{TAFXjnW~AlH+$#UE3`K0-&(FsYdDn!%SzJz
ztTrlL4fXv(^Ds?}&b1_!{Ox5qX<3qA6I&MKOeSC-2cF&R&_u79gFvk)9H5i41=;(Q10gVyI26fi!Z$
zb68XkqF`uPTP!ojh)uu2$4odJG~4FR0gdB7qD0?`3eUc^`O!HRvSu2lv|5qWZp+eT
z&|m+a;d7zRKE{p4jI(PkXPGwESUEr)CP=fjj9yf=LdwM{MV9nZyDeMGXsI-s7o?}@
zlRY&(*t}B0nVc#?oh1ccG7hfFaZFq^~
zY(ZizBgGeA6?d!j39Hm>Ht%mV7%xFq{omk^A=_>6c+te(vMbP}$#a`;xFQW3@Ov{`
zMrva74LOtN(jGkZb>ZBi!}^GmhabV&3D#f-+l!qOT87O=QBBW5M(Z+;mwTXpA9UUz
zE6$iaEoG|SjddxgRCeR&rUz?D|Wn1P9smiy&3x$OeapYp3eQusD(l
z?9Bl%b~l3hA*~H;I>n-ogI$A5YZ>Bhm&ZL}O#eR&fAQwiv^z+!_!Ln~O@sJf-YMVxUmUI^Tk<^CfRk3NS
zfOcR9iqV_L#DQ|zZ=pK^Gll@~bq(qqaDaFm?wgDHB4*+Glh}=iIrgpu8bIeiL
z(9{iwxgPkwgZ=1M>RzYbfr(?P;&FgOF&y?~z^=_<4-uwGK{#l9uKzYXYAM#yvrde0
z@aLp6yT~Ewc4&HWR@W4hM>U>q0IIqirF1p)jxbz{M?#R}wNuJzd)*Z$-&a1eNzS5!
z%^6CEMTuMFUfB0`I&7`-Bdkjt$kzLKdX`itYOv^MjN!qK+3#+Oz5Dt%qE@HOuG9BH?1yyW*
zH`s!T10s23v>Y1)$n0wRQWd7}<#wE@%~m%$)LH8CeC%x?pjcsVf#K?1mek=Scbaga
zxrl^lQu+X|G&;cU#0ILd)@jhDt~Q^%N0nNK5CmC!=fLPNUAYID3XN;+8-{7ao2`aL
zd@-gwvF^hGuL2q&E)+v6lip^enFjQ`v!Dc`>U=Msq-wz=c*xT!3K1u5C!c
z*@_l<4gqE5ny*M)xOy0u1-&vf<(DBo_&Wir6cPxN*573Kt@X6_r*Z4=?cAu_s=dlh
zzyA-xjRsF9({Biea7Y$#K_&?Env~_i%qv7)G{(@gc9XODAAoLwfW&=
zcs3mpn|_E>y1CGvJx0bRCELkbMcjN3BBR%I&Rn`)6nF0tcQ^FlWJBo`sUOANO|7n@
z(DrAvChS2qvKcv`g#SSS@)(7Y?Mnd}C3_Umwiz#VJEz-rPS5R}lG{0jx9>%2JPx4}
zWx&Qwx=vv>=_-Xeq{$T(WK(7nBd;P^i4wdczIz~SkNEE@;YJQa(<}ayfh22O
zL%8AeCGu3L>D67-$=aqqbr;pe(=SF>4}jWFa@(8ujfl~B-`^3-5_i8EEpr?&$3lEI
zlHim{m7bPFd*Rt=DUHzPakTVh#4|ZU3J*odv3v#*vf=c#(vpVLA*rn4^lB-;;q-bj
z@-E`d_RzURyo10ztL0I1laZJ+W_QJ(Ly}1ySN!pTa6$Ybgj;MguaYM;eBUxn&d0iw
z%nJ%7^R5BROgD$P(u8=5$V@&*Nr;66u}W+zr}yn*(=2-5g)=*P%VN`I^xhygT}bbx
zV$%e?)$KGM&gXp_jnEA9i!bi+vkR{W7mCDMA~f!Ta4U%(Kp@J;8_UNbe~WFdS!@$z
zs%?lk3!)dA!T4_Mh-j!JR*4V12vJ0AT8b&o4JChvj+YiFY>wo@&?&^9qoP8LJd0gJ
z!=8eUNHYpU4#6Dv@c{CjbOgEy{ci%g
zsO^ITj7-18rhaS#S$_gnEu2L&bFv1ePqkLzGe>C!Qm*<
z%xKmvelHb=#V0vcklmwI*df|YGaE_6QG7*!aT!XHavvdQ)geu}8rjXBk55H#%gTXP
z``P?L^nW}L(Mn>C_&_P@EH+K%-FK_F*~a5<6Mwh^RZfQu!PE_EmTv6`y%pEV($B0^L
zQAFvjdzU$+AiW3%HDB}pMWBQG+yBq6=Ylr!Y6)}G5v)kY%tic
zi)Q|9aJ7TFYY<@DAb{smd3drv(K>JgqFYtG6;HDb)a7cF)@c0_g{>8v?gX2_G-+B&
z@-eLzFH@)R-s|9N`9;W)96M$|f_0+rt^|Aw2e2<{kGunf#my1&D(T=WJvvi}=KuzM
zjSONWm+*H$S5YA4n?&htg-{~CGOtg!(Agr0o%`k%tC>#zYQn8g(z!L2F$n
z508BJXJ8M99qsWV&g2dz^eLSC
z4e|k#`{^0iHq3zZ>jmtWCahVKvWr9o5S(n>%;`onosND$gp_Ia)us9vI!8}_73ny<
zvr5CwG_ZP-Fo<;KDqh5!P?VJ5sNub>PAE+h`uL#>%(jB-*^kLdW*0FB1OCItrVkBH
zT5T6ec-mupz2bufh8P6J_D)TWF5FuoU1kn6JCN_9a`e&W>ZZN6t@wI2{cFhos_fbe
zAk>mbEXk4B#=A96Xf^Uv#K6nIh*H2GPLKLdpvVX51E%Y2&GIC3m#-?}vC?J6c1k^~
zraTX?=6F<7*+sap50HPMU!ZWCWJ4cgX21le4o)`eSP^X(OgaenUYTr^baAp#-=Nw}
zILbWbs*C@1ZR^`
z4_Lfh^$J08Wz5`7Y_Ij{`cn(T2*bR_^$lRNgyo+J)^t1iQ6@>f_n{-lTCOyy19Ww4F
zvNsfbHE`y?f%fIEBE!JGd;>ldY`ANNmK~>qg0uGJi;`eh4S=!t5B)3q@+!8DH}G7$
z?p|VEbOc1Z?xR3wHlB4Mh&r8X*IkA3&0Y6yLOZypgqr$EoUm~HwSNn@&gr+vnY|57
zXo>||JUGFP>-CmF{|XcFdjJ}?z(;Kj2^`hFmBhWS6kIfbODbf2z#1GKhVYZF^9a^k`zUC^Uv*`U1lBrwT
z2f*0v2>x?DNEJ)xWEetkXBGP+C{6xvy2zvfjOU{7aip5`T~25Q=}Ss{MVbIb{umr9
zFm)H-6`zFgivwmgop#&h_LR;ZhiaP8ID?y2U!rVq+2lgan_F6t_V1&mywdaOl~X#2
zUKyo=_YsSn#=4=Tf$t4#MMK($qSg23LZW4j}i&yLg1fwEo1=
zW0kgirMhq>L`&tALyVAmDqI-UHr}MHDsfNXHos{Q>dQ<2J@p--?}eq&)c1Y#J*yPby$MgsTK%%pAE&-s=zBux
zc55;n83jG3^ac8c59umWJsRs)!Ql8%P~Er+CNF5q!9g(i&4%g2#oGFmf
z8qQ?OKL;jz`!z5Lz!XF?#ilr_(ULy1K(f!wl+(-g;|uk_@`M9O;McZfV#4qWx(ti^
z$Xs$RE1%Oa>n7CX{5iD+Jt!^Vb#$N#^JivC^I*JO3I&xPz!xyB+H7QYOFEioHqOWxY^k2jLP-!?
zxsidSCC>7A0HO%8XS7=dq8Mdx#d8pemMmWen%Tg;49kG~q7#R5f$Ea+Gz2jMe^`>T
zo$Z7gFHqGyzH1Cw?QFxO-$HJy>hziDY4~PkplvhqWgFaFwIzL|O0TMl)}LTm;ApU8
z#F#2Ysk7H!N1FLl#@cJNMC&Kks9nT)*;Rfv{V>3Kf-pn5IGNqnUmYL>`)P6K2r9Oc
zORLJ=TcMIAD?S|14yih9{eF*%X@}jto)5T!a(!)yTf%uIu8As^6UlcEt{vM6xIiFK
z)D}3H@cOF)0USdZ?~%!8yEMbke(@gX!+rR=BmlK;0%ss60A;*~nE?aAK2{7xHr)|{
z%g|n+GBG;oo;V8As=})k*ctl__P_L~KBr+kW(_R1cJ3o6dVDJs#EZNQK;35K#qi>`
z2LYHw($*($FO^UC;*B+?ck}Kc19&(LqyYBOXZ!NDEo%Tz`oB@wgPj$&0I-XL!;V_M
z8q{`dAsY^ajdiQpICw%27-Hoi2mAC@EQBZ)^#Jjv*JNMNe`QQt}IN>`W8mGndo+0$XJb}GWfF)>ohS&CCqRm3P0Sy37
z^C!QESa&Zv596GmE#Dz*sl%%hEhPr&K{8Q@B5L;-Q}ASXopr_h(d5`
zc6d7hmQOXmUNyg_nP0y*zh;_Wzc#%GW
zO>S}<4L?lIaV5@d+@4U)G)hY$nSfjns9yjMb_wV}!cZ{LbIVGjWX8W>;(@ZD4y4xkXw^8M{H_E+stX`
zH{K=H(KbY|t6s-om`>TX=`@d_{NPhHSy{uxXI9pnnp@4v@-o=uce!Hb7Oj+KcIcv8
zfPrH(-ZKttP0i}E)OfxW!Z(;HjpyRC$1}h&B*xNMo(j#!*<%@k7B295?+g)lInIK(
zi%_?VAAl&gCip^GRsG7skG(Iyj7A!e#8r&74Rp)z?5oqnaNI1P(dug}$ht3%`l0xar#9m{z)R&?yyY0>Wlwl$DTb|O7?!9Lhe6rsy3(&WZlp_(u@@$ATjyx+`t
zwRY7vx0~jEzCUFh@n##bx+n1DezI{`PN5>5DJKcJ?4RNw_rGSs!n2j3O;}id#jZzG
zhmehW>yMir7L~?;hef5~An2>u)Vb)EbnO5g_YGW8L{?!zShzIow=^%X+P{&G)qTfeGkt~BILy{!#m)6*C|AdppB1G`S(e-~%2+*tx<*Q42+d$8r4p`cEZ
zABk3&NyiN=oXV|-vDij^hbeYH77%lvF$Pu;hsVexwGOBlk@E5abn#v%M(!T&$aWQ5
z5ARs?8@EyINLr%-Abm=gxhv|yzf(}osR8~rZOS%}l
zSG*?S%W8h%MPGKTa7cqE?Syt;zeUf7c)XJ?-Qc4j3vpK6j;D~EykgUH7({LZ_3*;5
zT8un~Pw+6#7%pf9M!{>cd4%_l
z*cSD55gl)XM9rxtHmxJ-C#+H!RzG6oIn)e3ipO4lMUae_52_JJmi}B}vUIWOI>eea_=y>L!5-8gLi^iz28x5_#bl`PgwlRI}&S~ErePmqOb9=0umEma3@
zFfQe-eG|eUazc28!8Eek5}pq?{TZwRts8$Q}p?D#ea
z422dKZWoMwl+o~)Oyuec*2yLUJC;BbR+d=3fp4q#8+*ed5I?_-6D%NYQfT>j?>!Netq)eRf*db+oZ
zZfq^79|u=^U+&~h=x8zt^-8ZoVjpS_Ph(JPXE!CNKXGARXG-)VnH?s{Kx3mdb1Lcp
z=^fW6mehAiQ%dSPB~Dqi#O$ghanWP;2HxVkylAnx@bl}`hu#Xo&Ic`F`kOT0QM#c&
zsH!HLyr$Qw+!_b$FvqYlXjan7V%ILiz(&s4#Ba3WUI@~M*ze^=D|^^~K*t7iHf00+
z8s-#zh+F79N@7l3wvxDUy#q!t;*K6%d)y=Lz@4FU>fk4Lw2
z#VyD2No7L0c@1v1OznG1{bT1qoR;q)T?ty+NG;{Y%IC3SG%wlGw|}2(seTJ|2GBwL
zrRa0C2#n~g79Bu~4v0G}-)K=QZxQNsi0XAfYUB0VQ`D-Dq#^C10Swb8IHsZpTqy-3
z89dQO96dsM7za@$59n(lg=W`!tZ?sawJLk8`p1YAcVrc{4p>2P@e^5+_jFn4MjN1+
zGt<`EID=o$bqqJTniq6cxPHt5|+;Mlh`%pCuGU&F>
zvFL92ZFx@BW#ZNa+xu_;&$>p5O@W6fZinWk-$MjMVsP@u*^LFzl6F^O&yDsRK3q$%2uGODvNa-G_aO%2Cam$6~d|q8p
z=%i~d?tLDs^Lgey7iG^o7)~H@Qw9Pn5YTs!iCpqZ6wG|I1+K-bG?Ivf#(V5k2i$+K
z$f+%MmMn6}X*(@QXscTEaszg^`mkgGNSx|?hdS4;-r-!b$iFmL6I_N}sd0ez>lNs6
zR{A>}L`{Y)qj@)=pGMKH**Ks>TkRb}eCENfg#`fH7J9D^MCMwXt$o<|nLG;}zg575
z-D;Qrx!I)
zx)Ki4&6;jc2vJ>s^iqOw
z-vA2jrYdfshrYyEf0OpMd9ht|4Hip%k%sGeMd(vmx^#X8uBCbv95l&gzK5Xx(r&8L
z7JBHTc2h&Q&_mDKO-K4A&)|vE%;C>sP9k@2K-uB3_=QUB61LJkT+3)n=ffn5kN}
z1LVXNcUra+0UQ4;sXSUgTB|$?f@;+_sb#|*hVQ8UXZ9ZuGGNM)^tq&^if~Lc)~~8E
zmUISTl3U}@aCro-#>34E0=v8ewQA#OXIBeXKcwme2kZmtyj8ef)(hGU4uV`zVX)V?
z<_~`2cM(h@i)w@4XF<9Gy1;_&v*^QR6u1CDysr_J)B20fMCaKdrPaw*EZ2va!}tz*
z4zI1)NrpBE;PPHuo2RW#goM$@R?-fF{J=&<=eiI(i^pD3urQd9}&MBdi^6Ev62mM+76J8F`8DK4HGP)xbOiF-wGlm
z{fLLL<3UVovv~Cf6viQjVoa&~7@F3h%p75294A{L>SLeVVo_Qx(7!Jdc{R5$|210$w5O0&xT{viJ3Pwc&lk<1kP#VVGPW+KT~g
z%$zW*lEox$tGB7HXv{kM5zh7IR&c0sW``&LY0w5
zM=Osm3OgZOEec!U8;q#*Ufeb)W490GNP(ccw@_U?J|T5YmZ$HAH}<-)SDpfG*OMd7pir
zWL|!65nb;?$B|^{^=IwfTID+e%!v0`ua{4tRi6|cBtb^CXF-G^dB%3VjO$OpM)x2C
z+YUS_^X^}ovd9v;T0A#kI>8=-6YLW11dBOT(?(}BrW5Q)_HUGx{|vzdUK#z*5La75;%3x6xNJ?%*S;WOjw+LrtjtBIj6O;Gyyy#Yhcmra#TtT2;wm6~MBKMWEp>-rfO
zP7M^vGW~`ETxA`snxb&LR9USTBhFHe?Esj4>V~KUO*c|Qr+)vy8(_o0!sz^QJg$>k
z_ga9R0iz1C=TTqJo1}nrXNsW$eoEmWrAg1oB
zzbZP;yfwsT{t!aTf*FO@{Rd8|j}-!Eb96SIxZ43or>89PMHqx957KGpc-Bd{3Iz5C
zyp!ld2*$?)cW>DYIRA=t#S>|qzXz!idxHQQH_FtzHa%iR1#oqAh86iL<=1S+v<&TYT#|JV>SSCCBQ~uBwC8ilIfGYCZ1UsV$m^rtd5n{U
zXNJfSnMy>#&glLNz}3}=7oRN=Y7fkWBe#KPwhYs@Q!CvuxWJLhH8KugTDq-6*#IN6
zycYko<|QYZE5B4$&VV9@Y^n75;_g;IduCRCj|&+kGdp&qDlPJR>2oh?}?J!-x=mgJX{?z%&zExS3Y@d~t^x
z4!ZrIbSv$7d1b8Xc50s*rIM3?iwz#f*24-SSnJ`G6eHJCIUK1qSx@O=I8gajp3bg2
z3wRu_2er?v4b*}qygc!~Q&1Ac9Z1)5T1J|+Y`#I9rsb$*;*9t)j3)Pf8k#d`Xl}u0
z!!8;#upXs~ijk+$B6Fmbz~S-!uK=OF=I$-x10foz01!MO*Iw8%5*~|>QK#X@lE8F0
zRoaDYZ+WxJTi)1jjD$PSA90U9&r$>@yJ%zzpwWjs=G@&lmx|+X7ETLKySUjyacxDM
zA;7^(yl*v{9VY{6v+D4Cr4AiZ=?bJNPtk-x-OZZQkfpjWr^?S2cZ{@sV0+W7zk>RY
zv23Sa4QDT_Rr598EL*!}JE1V2wW`b3-nfe*`l)3*|GO=_orIXLwd`h!8``pY)G|6*
z)=c(i&T;AonIfY!43iBT(yUX-W_h%S`C#8M!oo+r?M-SLZU&`BeQR@`21BchxqAbF
z&=PDfqtPBX>o1B%D}c7Bl^e4AwN`&1=_AQ!=3&nVV$)BOvxxU9n$hl$Hch9@=jvL?
zY1>YKo)0#?rYC>r&K%l^xk=*|Sq*pV@9Mrz_?_s40f
zJVm1yT#)9(+4vQ{h6wU%WfzB$E`?iR37>*+um1R##NJ6BXP+Uc7VEQ<)AI{_2K6ga
z+=iIB7h6}}F%2`ua5IcT_6weW9OeHtf=U554svR|B1L>sEF($H{l;?kpAYlf$XubP
zX_K%Q;CvHH-idW)URKS~QFUm``Vih{S>?@7f)x*dvA3W3*R1l^knt)%692mI;hhQ8
zJ7op=F3=_E@d1L=o^$vfR-$~H#L-a@m0P=D-h@5v
zXf}c>R@sH?Y`=Uu)xdK#Oah$Qun!5Zxhq@LJJ;#O8LUROKsZYOKBO$%CF8WD>$6}m
zMy6RCiy=6+2nH@@eT}WcrXIf+q0BPm9A)+;IK+8v+ibQFu>`v5k7PQ4dno$UjMOh!
z{R&w;O>?UDC1#c=PBb9v;En?cee?-vl`*jN9Tz8^&_guf#889tL@&5salNLnVt
zBafU2QXY9X4}-9SpEe+myhs0l`S8Exkq5h`0gpWH_@r(>Obtjn$dS?bpkLl^v$zjL
z43R0XJQYzxyz*Xbh+$io^vctszX#+Uu}tyBCPydY!;V&FUv4@rVu5KohTVBMBRIrU
zZ|#E+$6_g8J?_V+a>tDnrOA>$MSC(s2A2>67xm>a2GL!Zmh=|Ik@)KVZ1C
z9?7ODXT9f9S8~=PYmB55*f+fb|ADjK<5Ph&<*Y{>joSXto%Qaa;?8o`n`t`hO;0-T
zpo{;3v)(jnirMw?DF~nCt@jvosloVg0wHfbx-k$AioE~etw+BCtt8;B_dF^}-g+Cz
zTkjI?t@l@?B|WZ8T9`cUpLpu+8423Qa4>!PpL*)02=uRe>irq?wDoKA5?Xa%JQIyb
zc9QAemvmY?hr`}a(G~w=u6iSWHN;hqI6djRQm%Sb(!cJiM~fu3k~B-c>8eMl{(G)^
z9Nj;1)k^_vBke5w{@}~^Ev|aRNK6lDHA)M2(oIiFy6G*$62w=E-RKds6FMRUcM-->
za?lKQ)0+g+g!^$DyAN%Pg$4qK)@cH`B
z9rd^m+P~zew{QgJ6*=me17pBZk7)JJ9rbR(&`LV$E&Yb0p7&b(gfZ-9psRx>@{9j>
zPd%E4|BIe_)belj)MLBQ(dY8iBgyYvo_b@xO!1Ykd+Jd+XM5^tUqk+nJoU)yj(h5H
z=e(4sUNd63r(XMo=ttu0R4tnaXg~6J#i*S9@Q;#y7?WqTwcbwRq
z%Ttdx>KmSVzl9OOSyWTFpCb3vBaXzzr~r71g18F)>z;a|Ibq$s@X}iaawdH>u~Xnq
zn;Bvxiu#;G#QvJ^-oO2T_J{ZhiZeg(3W@~gj7A>Mv
z6e#>*+zXgmz!>h^;nz#!zMXrLaLWham<2)oAh&Lc&g9ljxYO9YZwFUyEV#n1
ze1!XUaCVAa_gOnyT&FqY;!658d5ka>x6v-?)JLvw$qk$
zvOAH^e|+vwR7CP9AXP>%|3DHu{s^e?!8dKPaPyGGu5@~9+Ae((
zf`v2*zjq~_r!ZV5Rw3c%joWtG?XU>gIf6g!ka*g5lGMUa6ty01ci=Xn<>RXT>~@^s
z;`Wjq`?OYdqn$3Q+30Nj$Th4ry3zIQv(LU1qXWmi@ckKA^yVw%)zp$}ftK%j`DFC|
z@%Y>=@HhwJ7Ro2N^9RmL8oS3qY#mhYYi>%T
zgHkX2=uWgmJ_Bfbgdz%X4p|(1>=+`%7x#%T+C2N#k1A^Q`lAnJQ+l{yG5ZCMO;N>`
zxfbYN;WPFwPBzv#qme#jLV13aO`y0l4d>g<*xS30R3RR*nA6n*$<@eI#XJ`ib1F*$
zV2K_C;8#j;pivyCkm$DAN5zombxy;A^zp8i&1r;IA5QMqc@$MZQ9#tZIh_cmXEZ;`
z1Ju@6^u;C_J0e{`^K(?#z5ik!)*hyqXj2YlvK@%tSfHF4y*{g9LC$zrbWwqxt(+dc
z-btSY(M6rjun}#?k3@Uks%tlOPRrPZ3fDO~Lyj*nSN%T)FPrfuxNKqzzd3Z?8XWFy
zS}jQDY3oB!3^;Jy{imPN=x%2r0Jp)J;xzOF<~&;;!p{kMrt|AXufj3o7@UX?_*L}N
zD3Ndx>t`r3xP^e!c_EEMBO5@6dc|O52>K5-xeEovXgZm*45n?cd9NZXwI1l0)bYhQ
z8^!O%$P+r!C